query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Print the data in slice iz, row ix of an image to standard out.
def print_image_col(input, ix=0, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice, x = %d row)" % (iz, ix) line = [] for iy in xrange(ny): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((iy + 1) % 5 == 0): line.append("\n ") line.append("\n") print "".join(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<ndata]\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<=ndata] # TODO: shouldn't this be \"<\"?\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def printImage(imageObject):\n # TODO\n pass", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def explore_data(dataset, start, end, rows_and_columns=False):\r\n for i in range(start,end):\r\n print(dataset[i],end=\"\\n\")", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def print_array(x, idx=slice(None), message=None, message_prefix=\"SHIM - \",\n file=sys.stdout):\n return set_subtensor(x[idx],\n print(x[idx],\n message=message,\n message_prefix=message_prefix,\n file=file\n )\n )", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, \"\n \"format='%s', itemsize=%s, flags=%s)\" %\n (x, nd.shape, nd.strides, nd.suboffsets, offset,\n nd.format, nd.itemsize, flags))\n sys.stdout.flush()", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def collatz_print(w, i, j, v):\n\tw.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def print_row(row,writer,x):\n sys.stdout.write(unichr(0x2503))\n for n in xrange(row.shape[0]-1):\n writer(row[n],Width,(x,n))\n sys.stdout.write(unichr(0x2502))\n if row.shape[0] > 0:\n writer(row[-1],Width,(x,row.shape[0]-1))\n sys.stdout.write(unichr(0x2503) + '\\n')", "def print(self):\n self.__print_local(self.dataset, 0)", "def print_wrapped(data, ncols=3):\r\n nrows = len(data)\r\n labels = data.index\r\n n_split_rows = int(np.ceil(nrows / ncols))\r\n for r in range(0, nrows, ncols):\r\n for c in range(ncols):\r\n try:\r\n numstr = '{}'.format(data[r + c])\r\n tabs = [' '] * (20 - len(labels[r + c]) - len(numstr))\r\n print(labels[r + c] + \"\".join(tabs) + numstr, end='\\t')\r\n except:\r\n pass\r\n print()", "def display(self):\n for row in self.tile_rows:\n print(row)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def display_napari(pos_img):\n global data\n global img_queue\n if pos_img is None:\n return\n # read image and z position\n image = np.reshape(pos_img[2:],(clip[0], clip[1]))\n z_pos = pos_img[1]\n color = pos_img[0]\n\n # write image into correct slice of data and update display\n data[z_pos] = np.squeeze(image)\n layer = viewer.layers[color]\n layer.data = data\n #print(\"updating \", z_pos, color)\n\n img_queue.task_done()", "def pprint(self, data):\n self._assert(data)\n data = self._render(data) # make elements ascii\n fmats = self._fmats(data) # get array of padding formats)\n for row in data:\n print(fmats.format(*row))", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def display(self):\n for row in range(self.height):\n for col in range(self.width):\n char = '#' if self.pixels[row * self.width + col] else '.'\n print(char, end='')\n print()\n print()", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def collatz_print (w, i, j, v) :\n w.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def show_ipv(data: np.ndarray):\n import ipyvolume as ipv\n return ipv.quickvolshow(data)", "def show_np(mat):\n for x in range(15):\n for y in range(15):\n if (x == 7) and (y == 7):\n print(\"\\033[%d;%d;%dm**\\033[0m\" % (0, 33, 41), end='')\n elif mat[x, y, 0] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 31, 41), end='')\n elif mat[x, y, 1] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 32, 42), end='')\n else:\n print(\" \", end='')\n print(\"\")", "def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):\n\n for line in pixdump_iter( source, start, end, length, width, height, palette ):\n print( line )", "def show(self):\r\n \r\n clear() \r\n print \" \" + \"-\" * self.__width + \" \"\r\n \r\n for row in self.__buffer:\r\n rowData = \"\".join(str(i) for i in row)\r\n print \"|\" + rowData + \"|\"\r\n\r\n print \" \" + \"-\" * self.__width + \" \"\r\n self.clearBuffer()", "def _convert_and_print_image(self, im):\n pixLine = \"\"\n imLeft = \"\"\n imRight = \"\"\n switch = 0\n imgSize = [0, 0]\n\n if im.size[0] > 512:\n print (\"WARNING: Image is wider than 512 and could be truncated at print time \")\n if im.size[1] > 255:\n raise ValueError(\"Image Height larger than 255\")\n\n imBorder = self._check_image_size(im.size[0])\n for i in range(imBorder[0]):\n imLeft += \"0\"\n for i in range(imBorder[1]):\n imRight += \"0\"\n\n for y in range(im.size[1]):\n imgSize[1] += 1\n pixLine += imLeft\n imgSize[0] += imBorder[0]\n for x in range(im.size[0]):\n imgSize[0] += 1\n RGB = im.getpixel((x, y))\n imColor = (RGB[0] + RGB[1] + RGB[2])\n imPattern = \"1X0\"\n patternLen = len(imPattern)\n switch = (switch - 1) * (-1)\n for x in range(patternLen):\n if imColor <= (255 * 3 / patternLen * (x + 1)):\n if imPattern[x] == \"X\":\n pixLine += \"%d\" % switch\n else:\n pixLine += imPattern[x]\n break\n elif imColor > (255 * 3 / patternLen * patternLen) and imColor <= (255 * 3):\n pixLine += imPattern[-1]\n break\n pixLine += imRight\n imgSize[0] += imBorder[1]\n\n self._print_image(pixLine, imgSize)", "def show(self, data):\n if isinstance(data, (numpy.ndarray, h5py.Dataset)):\n isAtomic = len(data.shape) == 0\n isCurve = len(data.shape) == 1 and numpy.issubdtype(data.dtype, numpy.number)\n isImage = len(data.shape) == 2 and numpy.issubdtype(data.dtype, numpy.number)\n if isAtomic:\n self.showAsString(data)\n elif isCurve:\n self.show1d(data)\n elif isImage:\n self.show2d(data)\n else:\n self.showAsString(data)\n else:\n self.showAsString(data)", "def display(self):\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(\" \", end=\"\")\n for row in range(self.width):\n print(\"#\", end=\"\")\n print()", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def print_azeltables(inviews, ic):\n for i in range(0, len(inviews)):\n print \" \"\n print \"Az/El for inview %s to %s\" % (inviews[i][0], inviews[i][1])\n azels = ic.compute_azels(inviews[i][0], inviews[i][1], 15)\n for j in range(0, len(azels)):\n print \"At %s, azimuth=%8.2f, elevation=%8.2f\" % \\\n (azels[j][0], azels[j][1], azels[j][2])", "def printImage(currentImage):\n\tprint currentImage + ' is set to be printed...'", "def __printRow(self, i):\n if i < len(self.__data) and i >= self.__firstShownLine and \\\n i < self.__firstShownLine + self.height - 2:\n text = self.__formatString % self.__data[i]\n self._window.addnstr(i - self.__firstShownLine + 1, 1, text,\n self.width - 2)", "def print_bitmap(self, w, h, image):\n\n bitmap = self._pack_bitmap(w, h, image)\n\n row_bytes = (w + 7) // 8 # Round up to next byte boundary\n\n if row_bytes >= 48:\n row_bytes_clipped = 48\n else:\n row_bytes_clipped = row_bytes # 384 pixels max width\n\n # Est. max rows to write at once, assuming 256 byte printer buffer.\n if self._dtr_enabled:\n chunk_height_limit = 255 # Buffer doesn't matter, handshake!\n else:\n chunk_height_limit = 256 // row_bytes_clipped\n if chunk_height_limit > self._max_chunk_height:\n chunk_height_limit = self._max_chunk_height\n elif chunk_height_limit < 1:\n chunk_height_limit = 1\n\n row_start = 0\n i = 0\n while row_start < h:\n # Issue up to chunkHeightLimit rows at a time:\n chunk_height = h - row_start\n if chunk_height > chunk_height_limit:\n chunk_height = chunk_height_limit\n\n self.write(self.ASCII_DC2, '*', chunk_height, row_bytes_clipped)\n\n y = 0\n while y < chunk_height:\n x = 0\n while x < row_bytes_clipped:\n self.timeout_wait()\n self._send_to_printer(int(bitmap[i]))\n x += 1\n i += 1\n\n y += 1\n\n i += row_bytes - row_bytes_clipped\n\n self.timeout_set(chunk_height * self._dot_print_time)\n\n row_start += chunk_height_limit\n\n self._prev_byte = '\\n'", "def myprint(dataset, indent=0):\n dont_print = ['Pixel Data', 'File Meta Information Version']\n\n indent_string = \" \" * indent\n next_indent_string = \" \" * (indent + 1)\n\n for data_element in dataset:\n if data_element.VR == \"SQ\": # a sequence\n print(indent_string, data_element.name)\n for sequence_item in data_element.value:\n myprint(sequence_item, indent + 1)\n print(next_indent_string + \"---------\")\n else:\n if data_element.name in dont_print:\n print(\"\"\"<item not printed -- in the \"don't print\" list>\"\"\")\n else:\n repr_value = repr(data_element.value)\n if len(repr_value) > 50:\n repr_value = repr_value[:50] + \"...\"\n print(\"{0:s} {1:s} = {2:s}\".format(indent_string,\n data_element.name,\n repr_value))", "def display(self):\n for row0 in range(self.y):\n print()\n for row in range(self.height):\n for column0 in range(self.x):\n print(\" \", end=\"\")\n for column in range(self.width):\n print(\"#\", end=\"\")\n print()", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def display_cropped_img(i):\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def printdata(self,whichstream_,firsti_,lasti_,firstj_,lastj_,firstk_,lastk_,c_,qo_,a_,qc_,bc_,bx_,vartype_,cones_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.printdata(whichstream_,firsti_,lasti_,firstj_,lastj_,firstk_,lastk_,c_,qo_,a_,qc_,bc_,bx_,vartype_,cones_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n self.command(\n const.COLUMNADDR, 0x00, self.width-1, # Column start/end address\n const.PAGEADDR, 0x00, self.pages-1) # Page start/end address\n\n pix = list(image.getdata())\n step = self.width * 8\n buf = []\n for y in xrange(0, self.pages * step, step):\n i = y + self.width-1\n while i >= y:\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[i + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n i -= 1\n\n self.data(buf)", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def displayData(indices_to_display = None):\n width, height = 20, 20\n nrows, ncols = 10, 10\n if not indices_to_display:\n indices_to_display = random.sample(range(X.shape[0]), nrows*ncols)\n \n big_picture = np.zeros((height*nrows,width*ncols))\n \n irow, icol = 0, 0\n for idx in indices_to_display:\n if icol == ncols:\n irow += 1\n icol = 0\n iimg = getDatumImg(X[idx])\n big_picture[irow*height:irow*height+iimg.shape[0], icol*width:icol*width+iimg.shape[1]] = iimg\n icol += 1\n fig = plt.figure(figsize=(6,6))\n\n big_picture = (big_picture * 255).astype(np.int8)\n img = Image.fromarray(big_picture, mode='L')\n plt.imshow(img, cmap = cm.Greys)", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def display(self):\n for x, p in zip(self.xs, self.ps):\n print(x, p)", "def my_print(self):\n if self.__size > 0:\n for k in range(self.__position[1]):\n print()\n for i in range(self.__size):\n for j in range(self.__position[0]):\n print(\" \", end='')\n print(\"#\" * self.__size)\n else:\n print()", "def print_images(i, df):\n \n images_folder_path = \"dataset/petfinder-adoption-prediction/train_images/\"\n plt.imshow(cv2.cvtColor(cv2.imread(images_folder_path+df.filename[i]), cv2.COLOR_BGR2RGB),);\n plt.axis(\"off\");\n plt.show()", "def display(self):\n for b in range(self.y):\n print()\n for i in range(self.height):\n print(\" \" * self.x + \"#\" * self.width)", "def imshow(self, depth):\n layer = self.cube[depth]\n img = []\n for i in range(self.height):\n img.append([layer[i][j].value for j in range(self.width)])\n plt.imshow(img, cmap='gray')\n plt.show()", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()", "def display(self):\n width = 1 + max(len(self.values[s]) for s in self.boxes)\n line = 'x'.join(['-'*(width*3)]*3)\n for r in self.rows:\n print(''.join(self.values[r+c].center(width)+('|' if c in '36' else '')\n for c in self.cols))\n if r in 'CF': print(line)\n print", "def display(self):\n result = []\n horizontal_bounds, vertical_bounds = self.get_bounds()\n min_x, max_x = horizontal_bounds\n min_y, max_y = vertical_bounds\n\n # xrange is inclusive in start and exclusive in end, IE [s..e)\n # so we add +1 offset to be safe\n for y in xrange(max_y, min_y - 1, -1):\n # since we have three rows and we're still relying on print,\n # displaying gets a bit dirty\n # will get cleaner once we move to something like HTML\n row_tiles = [self.tile(x, y) for x in xrange(min_x, max_x + 1)]\n\n # now we have to print each of the three rows together.\n # zip to aggregate each of the top, middle, bottom rows\n row_lines = zip(*[str(tile).split(\"\\n\") for tile in row_tiles])\n for line in row_lines:\n result.append(\"\".join(line))\n\n return \"\\n\".join(result)", "def print_stats(cars, notcars):\n print(\"Number of car samples: {0}\".format(len(cars)))\n print(\"Number of non car samples: {0}\".format(len(notcars)))\n img = cv2.imread(cars[0])\n print(\"Image shape: {0}x{1}\".format(img.shape[0], img.shape[1]))\n print(\"Image datatype: {}\".format(img.dtype))", "def display(self):\n for i in range(self.y):\n print()\n for i in range(self.height):\n for k in range(self.x):\n print(' ', end='')\n for j in range(self.width):\n print('#', end='')\n print()", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def show_image(self, pic, prediction=None):\n digitmap = {\n 0: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 1: [(0,2), (1,2), (2,2), (3,2), (4,2)],\n 2: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,0), (4,0), (4,1), (4,2)],\n 3: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 4: [(0,0), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,2)],\n 5: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 6: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,0), (3,2), (4,0), (4,1), (4,2)],\n 7: [(0,0), (0,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 8: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2), (2,1)],\n 9: [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)]\n }\n\n pic = pic.reshape((28,28)).copy()\n if prediction is not None:\n for pos in digitmap[prediction]:\n pic[pos]=255\n plt.imshow(pic, cmap='gray_r')", "def print_vector(self):\n print self.x, self.y, self.z", "def display_mask(i):\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)", "def printData (data):\n print(str(len(data)) + '\\t' + str(data))", "def print_data(place):\n raise NotImplementedError", "def print(self):\n print(\" 0 1 2 3 4 5 6 7 8 \")\n print(\" -------------------------\")\n for x in range(0, 9):\n print(f\"{x} | \", end=\"\")\n for y in range(0, 9):\n if self.field[x][y] == -1:\n print(\"* \", end=\"\")\n else:\n print(f\"{self.field[x][y]} \", end=\"\")\n if y % 3 == 2:\n print(\"| \", end=\"\")\n print(\"\")\n if x % 3 == 2:\n print(\" -------------------------\")", "def show_all(img, overlay=None, axis='z'):\n xlen, ylen, zlen = img.GetSize()\n all_images = []\n all_overlays = []\n if axis == 'z':\n all_images = [img[:, :, z] for z in xrange(zlen)]\n if overlay:\n all_overlays = [overlay[:, :, z] for z in xrange(zlen)]\n elif axis == 'y':\n all_images = [img[:, y, :] for y in xrange(ylen)]\n if overlay:\n all_overlays = [overlay[:, y, :] for y in xrange(ylen)]\n elif axis == 'x':\n all_images = [img[x, :, :] for x in xrange(xlen)]\n if overlay:\n all_overlays = [overlay[x, :, :] for x in xrange(xlen)]\n else:\n raise Exception('invalid axis')\n\n for i, image in enumerate(all_images):\n if overlay:\n show_one(sitk.LabelOverlay(image, all_overlays[i]))\n else:\n show_one(image)\n plt.show()", "def print(self):\n # IMPLEMENT ME\n for i in range(self.height):\n for j in range(self.width):\n print(self.board[i][j], end=\" \")\n print()\n print()", "def my_print(self):\n if self.__size is not 0:\n for ite in range(self.__position[1]):\n print()\n for ite in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.size)\n else:\n print()", "def printDataRange(matrix):\n print(\"Samples\\tMin\\tMax\\tMedian\\t10th\\t90th\")\n for i, sample in enumerate(matrix.matrix.sample_labels):\n start = matrix.matrix.sample_boundaries[i]\n end = matrix.matrix.sample_boundaries[i + 1]\n sample_matrix = matrix.matrix.matrix[..., start:end]\n print(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\".format(sample, np.amin(sample_matrix),\n np.amax(sample_matrix),\n np.ma.median(sample_matrix),\n np.percentile(sample_matrix, 10),\n np.percentile(sample_matrix, 90)))", "def print_cell_information(obj_ase_cell):\n # print the lattice vectors\n print('a1=',obj_ase_cell.cell[0,:])\n print('a2=',obj_ase_cell.cell[1,:])\n print('a3=',obj_ase_cell.cell[2,:])\n for i,a in enumerate(obj_ase_cell):\n print(i,a.symbol,a.position)", "def print(self):\n for row in self.board:\n print(row)", "def dumpResults(x,y,lon,lat):\n for i in range(0,len(x)):\n print(x[i],y[i],\"lonlat\",lon[i],lat[i])\n return", "def display(self):\n for i in range(self.__y):\n print()\n for i in range(self.__height):\n print(\" \" * self.__x + \"#\" * self.__width)", "def display(self):\n width = self.width\n height = self.height\n x = self.x\n y = self.y\n for d_y in range(y):\n print()\n for h in range(height):\n if x != 0:\n print(\" \" * x, end=\"\")\n print(\"#\" * width)", "def display(self):\n for i in range(0, len(self.top_row)):\n self.top_row[i].display()\n for i in range(0, len(self.bottom_row)):\n self.bottom_row[i].display()\n for i in range(0, len(self.left_col)):\n self.left_col[i].display()\n for i in range(0, len(self.right_col)):\n self.right_col[i].display()", "def showTensorImg(ts, title):\n img = np.transpose(ts, (1, 2, 0))\n showImg(img, title)\n return" ]
[ "0.76172984", "0.75990057", "0.7544907", "0.74450433", "0.73937047", "0.7347187", "0.7033798", "0.68403655", "0.6811467", "0.66571254", "0.6630757", "0.6279358", "0.6275334", "0.61232245", "0.60401046", "0.5973948", "0.5973784", "0.5965042", "0.59583265", "0.59162855", "0.5894761", "0.5819436", "0.5811356", "0.58048147", "0.57793635", "0.57645047", "0.5754731", "0.57335156", "0.57335156", "0.5726376", "0.5715056", "0.57111317", "0.57064384", "0.56567574", "0.5577068", "0.55756503", "0.55694", "0.55646145", "0.55592704", "0.55557805", "0.5544266", "0.5533631", "0.5533631", "0.55232507", "0.55178654", "0.5515087", "0.5503338", "0.5490018", "0.5488094", "0.5487511", "0.5482174", "0.54544526", "0.5454091", "0.54539174", "0.5419365", "0.54145783", "0.53863937", "0.53683513", "0.5363214", "0.5358269", "0.5357344", "0.5318814", "0.53133845", "0.53115493", "0.53113484", "0.5308694", "0.52823716", "0.5281389", "0.52733016", "0.5255574", "0.5242868", "0.52380145", "0.52352977", "0.5234517", "0.5230952", "0.52300847", "0.5228469", "0.5226477", "0.52176017", "0.52133596", "0.51984113", "0.51942176", "0.51879305", "0.5177211", "0.51671976", "0.51668286", "0.51616746", "0.5159799", "0.515941", "0.51542795", "0.51455265", "0.51432824", "0.51390004", "0.5137368", "0.51322716", "0.5130539", "0.51247895", "0.5123635", "0.51206356", "0.51191825" ]
0.7349031
5
Print the data in slice iz, column iy of an image to standard out.
def print_image_row(input, iy=0, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice, y = %d col)" % (iz, iy) line = [] for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append("\n") print "".join(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def explore_data(dataset, start, end, rows_and_columns=False):\r\n for i in range(start,end):\r\n print(dataset[i],end=\"\\n\")", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def collatz_print(w, i, j, v):\n\tw.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def collatz_print (w, i, j, v) :\n w.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def printImage(imageObject):\n # TODO\n pass", "def _convert_and_print_image(self, im):\n pixLine = \"\"\n imLeft = \"\"\n imRight = \"\"\n switch = 0\n imgSize = [0, 0]\n\n if im.size[0] > 512:\n print (\"WARNING: Image is wider than 512 and could be truncated at print time \")\n if im.size[1] > 255:\n raise ValueError(\"Image Height larger than 255\")\n\n imBorder = self._check_image_size(im.size[0])\n for i in range(imBorder[0]):\n imLeft += \"0\"\n for i in range(imBorder[1]):\n imRight += \"0\"\n\n for y in range(im.size[1]):\n imgSize[1] += 1\n pixLine += imLeft\n imgSize[0] += imBorder[0]\n for x in range(im.size[0]):\n imgSize[0] += 1\n RGB = im.getpixel((x, y))\n imColor = (RGB[0] + RGB[1] + RGB[2])\n imPattern = \"1X0\"\n patternLen = len(imPattern)\n switch = (switch - 1) * (-1)\n for x in range(patternLen):\n if imColor <= (255 * 3 / patternLen * (x + 1)):\n if imPattern[x] == \"X\":\n pixLine += \"%d\" % switch\n else:\n pixLine += imPattern[x]\n break\n elif imColor > (255 * 3 / patternLen * patternLen) and imColor <= (255 * 3):\n pixLine += imPattern[-1]\n break\n pixLine += imRight\n imgSize[0] += imBorder[1]\n\n self._print_image(pixLine, imgSize)", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def display(self):\n for row in range(self.height):\n for col in range(self.width):\n char = '#' if self.pixels[row * self.width + col] else '.'\n print(char, end='')\n print()\n print()", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<ndata]\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def show_np(mat):\n for x in range(15):\n for y in range(15):\n if (x == 7) and (y == 7):\n print(\"\\033[%d;%d;%dm**\\033[0m\" % (0, 33, 41), end='')\n elif mat[x, y, 0] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 31, 41), end='')\n elif mat[x, y, 1] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 32, 42), end='')\n else:\n print(\" \", end='')\n print(\"\")", "def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<=ndata] # TODO: shouldn't this be \"<\"?\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def display(self):\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(\" \", end=\"\")\n for row in range(self.width):\n print(\"#\", end=\"\")\n print()", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_azeltables(inviews, ic):\n for i in range(0, len(inviews)):\n print \" \"\n print \"Az/El for inview %s to %s\" % (inviews[i][0], inviews[i][1])\n azels = ic.compute_azels(inviews[i][0], inviews[i][1], 15)\n for j in range(0, len(azels)):\n print \"At %s, azimuth=%8.2f, elevation=%8.2f\" % \\\n (azels[j][0], azels[j][1], azels[j][2])", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def show(self, data):\n if isinstance(data, (numpy.ndarray, h5py.Dataset)):\n isAtomic = len(data.shape) == 0\n isCurve = len(data.shape) == 1 and numpy.issubdtype(data.dtype, numpy.number)\n isImage = len(data.shape) == 2 and numpy.issubdtype(data.dtype, numpy.number)\n if isAtomic:\n self.showAsString(data)\n elif isCurve:\n self.show1d(data)\n elif isImage:\n self.show2d(data)\n else:\n self.showAsString(data)\n else:\n self.showAsString(data)", "def print_bitmap(self, w, h, image):\n\n bitmap = self._pack_bitmap(w, h, image)\n\n row_bytes = (w + 7) // 8 # Round up to next byte boundary\n\n if row_bytes >= 48:\n row_bytes_clipped = 48\n else:\n row_bytes_clipped = row_bytes # 384 pixels max width\n\n # Est. max rows to write at once, assuming 256 byte printer buffer.\n if self._dtr_enabled:\n chunk_height_limit = 255 # Buffer doesn't matter, handshake!\n else:\n chunk_height_limit = 256 // row_bytes_clipped\n if chunk_height_limit > self._max_chunk_height:\n chunk_height_limit = self._max_chunk_height\n elif chunk_height_limit < 1:\n chunk_height_limit = 1\n\n row_start = 0\n i = 0\n while row_start < h:\n # Issue up to chunkHeightLimit rows at a time:\n chunk_height = h - row_start\n if chunk_height > chunk_height_limit:\n chunk_height = chunk_height_limit\n\n self.write(self.ASCII_DC2, '*', chunk_height, row_bytes_clipped)\n\n y = 0\n while y < chunk_height:\n x = 0\n while x < row_bytes_clipped:\n self.timeout_wait()\n self._send_to_printer(int(bitmap[i]))\n x += 1\n i += 1\n\n y += 1\n\n i += row_bytes - row_bytes_clipped\n\n self.timeout_set(chunk_height * self._dot_print_time)\n\n row_start += chunk_height_limit\n\n self._prev_byte = '\\n'", "def display(self):\n for row0 in range(self.y):\n print()\n for row in range(self.height):\n for column0 in range(self.x):\n print(\" \", end=\"\")\n for column in range(self.width):\n print(\"#\", end=\"\")\n print()", "def print_wrapped(data, ncols=3):\r\n nrows = len(data)\r\n labels = data.index\r\n n_split_rows = int(np.ceil(nrows / ncols))\r\n for r in range(0, nrows, ncols):\r\n for c in range(ncols):\r\n try:\r\n numstr = '{}'.format(data[r + c])\r\n tabs = [' '] * (20 - len(labels[r + c]) - len(numstr))\r\n print(labels[r + c] + \"\".join(tabs) + numstr, end='\\t')\r\n except:\r\n pass\r\n print()", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, \"\n \"format='%s', itemsize=%s, flags=%s)\" %\n (x, nd.shape, nd.strides, nd.suboffsets, offset,\n nd.format, nd.itemsize, flags))\n sys.stdout.flush()", "def display_cropped_img(i):\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n self.command(\n const.COLUMNADDR, 0x00, self.width-1, # Column start/end address\n const.PAGEADDR, 0x00, self.pages-1) # Page start/end address\n\n pix = list(image.getdata())\n step = self.width * 8\n buf = []\n for y in xrange(0, self.pages * step, step):\n i = y + self.width-1\n while i >= y:\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[i + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n i -= 1\n\n self.data(buf)", "def display(self):\n for row in self.tile_rows:\n print(row)", "def display(self):\n for i in range(self.y):\n print()\n for i in range(self.height):\n for k in range(self.x):\n print(' ', end='')\n for j in range(self.width):\n print('#', end='')\n print()", "def show_ipv(data: np.ndarray):\n import ipyvolume as ipv\n return ipv.quickvolshow(data)", "def imshow(self, depth):\n layer = self.cube[depth]\n img = []\n for i in range(self.height):\n img.append([layer[i][j].value for j in range(self.width)])\n plt.imshow(img, cmap='gray')\n plt.show()", "def display(self):\n for b in range(self.y):\n print()\n for i in range(self.height):\n print(\" \" * self.x + \"#\" * self.width)", "def display(self):\n width = 1 + max(len(self.values[s]) for s in self.boxes)\n line = 'x'.join(['-'*(width*3)]*3)\n for r in self.rows:\n print(''.join(self.values[r+c].center(width)+('|' if c in '36' else '')\n for c in self.cols))\n if r in 'CF': print(line)\n print", "def display_mask(i):\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def header(self):\n print 'dimensions',self.data.shape\n print 'llcorner', self.xllcorner, self.yllcorner\n print 'cell size', self.cellsize", "def show_image(self, pic, prediction=None):\n digitmap = {\n 0: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 1: [(0,2), (1,2), (2,2), (3,2), (4,2)],\n 2: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,0), (4,0), (4,1), (4,2)],\n 3: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 4: [(0,0), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,2)],\n 5: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 6: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,0), (3,2), (4,0), (4,1), (4,2)],\n 7: [(0,0), (0,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 8: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2), (2,1)],\n 9: [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)]\n }\n\n pic = pic.reshape((28,28)).copy()\n if prediction is not None:\n for pos in digitmap[prediction]:\n pic[pos]=255\n plt.imshow(pic, cmap='gray_r')", "def display(self):\n width = self.width\n height = self.height\n x = self.x\n y = self.y\n for d_y in range(y):\n print()\n for h in range(height):\n if x != 0:\n print(\" \" * x, end=\"\")\n print(\"#\" * width)", "def print(self):\n self.__print_local(self.dataset, 0)", "def display_napari(pos_img):\n global data\n global img_queue\n if pos_img is None:\n return\n # read image and z position\n image = np.reshape(pos_img[2:],(clip[0], clip[1]))\n z_pos = pos_img[1]\n color = pos_img[0]\n\n # write image into correct slice of data and update display\n data[z_pos] = np.squeeze(image)\n layer = viewer.layers[color]\n layer.data = data\n #print(\"updating \", z_pos, color)\n\n img_queue.task_done()", "def output(self):\n width = \" \" # 6 spaces formatting\n print(\"\\n\\n\")\n for row in range(self._length, -1, -1):\n if row != 0:\n print(row, end = width)\n for col in range(0, self._length):\n #print(self.board[col][row - 1], end = width)\n self.board[col][row-1].output(width)\n print(\"\\n\\n\")\n else:\n print(width, end=\" \")\n for col in self.columns:\n print(col, end = width)\n print(\"\\n\\n\")", "def pprint(self, data):\n self._assert(data)\n data = self._render(data) # make elements ascii\n fmats = self._fmats(data) # get array of padding formats)\n for row in data:\n print(fmats.format(*row))", "def info(self):\n\n\t\tprint(\"Pixels on a side: {0}\".format(self.data.shape[0]))\n\t\tprint(\"Pixel size: {0}\".format(self.resolution))\n\t\tprint(\"Total angular size: {0}\".format(self.side_angle))\n\t\tprint(\"lmin={0:.1e} ; lmax={1:.1e}\".format(self.lmin,self.lmax))", "def displayData(indices_to_display = None):\n width, height = 20, 20\n nrows, ncols = 10, 10\n if not indices_to_display:\n indices_to_display = random.sample(range(X.shape[0]), nrows*ncols)\n \n big_picture = np.zeros((height*nrows,width*ncols))\n \n irow, icol = 0, 0\n for idx in indices_to_display:\n if icol == ncols:\n irow += 1\n icol = 0\n iimg = getDatumImg(X[idx])\n big_picture[irow*height:irow*height+iimg.shape[0], icol*width:icol*width+iimg.shape[1]] = iimg\n icol += 1\n fig = plt.figure(figsize=(6,6))\n\n big_picture = (big_picture * 255).astype(np.int8)\n img = Image.fromarray(big_picture, mode='L')\n plt.imshow(img, cmap = cm.Greys)", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def display(self):\n for i in range(self.__y):\n print()\n for i in range(self.__height):\n print(\" \" * self.__x + \"#\" * self.__width)", "def write_window(img, ds, window):\n new_img = np.array([img[:, :, i] for i in range(img.shape[2])])\n ds.write(new_img, window=window)", "def print_images(i, df):\n \n images_folder_path = \"dataset/petfinder-adoption-prediction/train_images/\"\n plt.imshow(cv2.cvtColor(cv2.imread(images_folder_path+df.filename[i]), cv2.COLOR_BGR2RGB),);\n plt.axis(\"off\");\n plt.show()", "def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def print(self):\n print(\" 0 1 2 3 4 5 6 7 8 \")\n print(\" -------------------------\")\n for x in range(0, 9):\n print(f\"{x} | \", end=\"\")\n for y in range(0, 9):\n if self.field[x][y] == -1:\n print(\"* \", end=\"\")\n else:\n print(f\"{self.field[x][y]} \", end=\"\")\n if y % 3 == 2:\n print(\"| \", end=\"\")\n print(\"\")\n if x % 3 == 2:\n print(\" -------------------------\")", "def print_data(place):\n raise NotImplementedError", "def display(self):\n [print() for i in range(self.__y)]\n for i in range(self.__height):\n [print(\" \", end=\"\") for i in range(self.__x)]\n for j in range(self.__width):\n print(\"#\", end=\"\")\n print()", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def __str__(self):\n result = ''\n for row in range(self.getHeight()):\n for col in range(self.getWidth()):\n result += str(self.data[row][col]) + ' '\n result += '\\n'\n return result", "def print(self):\n # IMPLEMENT ME\n for i in range(self.height):\n for j in range(self.width):\n print(self.board[i][j], end=\" \")\n print()\n print()", "def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):\n\n for line in pixdump_iter( source, start, end, length, width, height, palette ):\n print( line )", "def print(self):\r\n base = 8 * self.width\r\n print(base * \"-\")\r\n for x in range(self.height):\r\n output = \"\"\r\n for y in range(self.width):\r\n output = output + self.board[x][y] + \"|\"\r\n print(\"|\" + output)\r\n print(base * \"-\")", "def show_slices(slices):\n fig, axes = plt.subplots(1, len(slices))\n for i, slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")", "def display(self):\n result = []\n horizontal_bounds, vertical_bounds = self.get_bounds()\n min_x, max_x = horizontal_bounds\n min_y, max_y = vertical_bounds\n\n # xrange is inclusive in start and exclusive in end, IE [s..e)\n # so we add +1 offset to be safe\n for y in xrange(max_y, min_y - 1, -1):\n # since we have three rows and we're still relying on print,\n # displaying gets a bit dirty\n # will get cleaner once we move to something like HTML\n row_tiles = [self.tile(x, y) for x in xrange(min_x, max_x + 1)]\n\n # now we have to print each of the three rows together.\n # zip to aggregate each of the top, middle, bottom rows\n row_lines = zip(*[str(tile).split(\"\\n\") for tile in row_tiles])\n for line in row_lines:\n result.append(\"\".join(line))\n\n return \"\\n\".join(result)", "def imdisplay(filename, representation):\n\n image = read_image(filename, representation)\n plt.imshow(image, cmap=\"gray\")\n plt.show()", "def disImg(data=None,colorbar=False):\n size = np.sqrt(len(data[4:]))\n xmm = data[0]\n ymm = data[1]\n pl.matshow(data[4:].reshape(size,size),fignum=False)\n if colorbar == True:\n pl.colorbar()\n pl.xlim(0,size-1)\n pl.ylim(0,size-1)\n pl.xlabel('Pixels')\n pl.ylabel('Pixels')\n pl.grid(color='yellow')", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: Any):\n raise NotImplementedError", "def imdisplay(filename, representation):\n image = read_image(filename, representation)\n\n if representation == GRAY_OUT:\n plt.imshow(image, cmap='gray')\n else:\n plt.imshow(image)\n\n plt.show()", "def show(self):\r\n \r\n clear() \r\n print \" \" + \"-\" * self.__width + \" \"\r\n \r\n for row in self.__buffer:\r\n rowData = \"\".join(str(i) for i in row)\r\n print \"|\" + rowData + \"|\"\r\n\r\n print \" \" + \"-\" * self.__width + \" \"\r\n self.clearBuffer()", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def print_vector(self):\n print self.x, self.y, self.z", "def print_array(x, idx=slice(None), message=None, message_prefix=\"SHIM - \",\n file=sys.stdout):\n return set_subtensor(x[idx],\n print(x[idx],\n message=message,\n message_prefix=message_prefix,\n file=file\n )\n )", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def print_cell_information(obj_ase_cell):\n # print the lattice vectors\n print('a1=',obj_ase_cell.cell[0,:])\n print('a2=',obj_ase_cell.cell[1,:])\n print('a3=',obj_ase_cell.cell[2,:])\n for i,a in enumerate(obj_ase_cell):\n print(i,a.symbol,a.position)", "def print_row(row,writer,x):\n sys.stdout.write(unichr(0x2503))\n for n in xrange(row.shape[0]-1):\n writer(row[n],Width,(x,n))\n sys.stdout.write(unichr(0x2502))\n if row.shape[0] > 0:\n writer(row[-1],Width,(x,row.shape[0]-1))\n sys.stdout.write(unichr(0x2503) + '\\n')", "def show_slices(self, slices):\n fig, axes = plt.subplots(1, len(slices))\n for i, slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")" ]
[ "0.77487314", "0.75519806", "0.750461", "0.73919225", "0.7150529", "0.7141496", "0.7027633", "0.6667874", "0.66633767", "0.6654244", "0.6348636", "0.621092", "0.6179944", "0.60063565", "0.59464717", "0.59386533", "0.5911687", "0.58938134", "0.5851181", "0.58338124", "0.58030814", "0.580005", "0.5770217", "0.57680136", "0.57322097", "0.5687016", "0.56508046", "0.56508046", "0.56486964", "0.5631367", "0.56241935", "0.56231993", "0.56194925", "0.5618533", "0.5608584", "0.5608584", "0.56033283", "0.55967665", "0.5579285", "0.55709326", "0.55670226", "0.5565117", "0.55197614", "0.5506052", "0.54795337", "0.5455547", "0.54323965", "0.5431751", "0.5415518", "0.54134214", "0.54053307", "0.5384232", "0.53834504", "0.53793746", "0.53791755", "0.53523403", "0.5348986", "0.53487146", "0.5321047", "0.5319799", "0.53061527", "0.52997816", "0.52917933", "0.5286587", "0.5281699", "0.5278258", "0.52758783", "0.5273376", "0.52664584", "0.52657247", "0.525592", "0.5242683", "0.5238663", "0.5227683", "0.5221043", "0.5217919", "0.5201372", "0.5196144", "0.51943666", "0.5190706", "0.5189621", "0.51893604", "0.5183344", "0.51832896", "0.5175574", "0.51731855", "0.5168432", "0.5162843", "0.5161328", "0.51551276", "0.5155126", "0.514921", "0.5142674", "0.5141334", "0.5140754", "0.5138634", "0.51380163", "0.51371795", "0.5120729", "0.5120167" ]
0.7496404
3
Print the data in slice iz of an image to standard out in a format that agrees with v2
def print_image_slice(input, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice)" % (iz) line = [] for iy in xrange(ny-1,-1,-1): line.append("Row ") line.append("%4i " % iy) for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append(" ") line.append("\n") if(nx%5 != 0): line.append("\n") print "".join(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def _convert_and_print_image(self, im):\n pixLine = \"\"\n imLeft = \"\"\n imRight = \"\"\n switch = 0\n imgSize = [0, 0]\n\n if im.size[0] > 512:\n print (\"WARNING: Image is wider than 512 and could be truncated at print time \")\n if im.size[1] > 255:\n raise ValueError(\"Image Height larger than 255\")\n\n imBorder = self._check_image_size(im.size[0])\n for i in range(imBorder[0]):\n imLeft += \"0\"\n for i in range(imBorder[1]):\n imRight += \"0\"\n\n for y in range(im.size[1]):\n imgSize[1] += 1\n pixLine += imLeft\n imgSize[0] += imBorder[0]\n for x in range(im.size[0]):\n imgSize[0] += 1\n RGB = im.getpixel((x, y))\n imColor = (RGB[0] + RGB[1] + RGB[2])\n imPattern = \"1X0\"\n patternLen = len(imPattern)\n switch = (switch - 1) * (-1)\n for x in range(patternLen):\n if imColor <= (255 * 3 / patternLen * (x + 1)):\n if imPattern[x] == \"X\":\n pixLine += \"%d\" % switch\n else:\n pixLine += imPattern[x]\n break\n elif imColor > (255 * 3 / patternLen * patternLen) and imColor <= (255 * 3):\n pixLine += imPattern[-1]\n break\n pixLine += imRight\n imgSize[0] += imBorder[1]\n\n self._print_image(pixLine, imgSize)", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def printImage(imageObject):\n # TODO\n pass", "def __writeImageBytes(self, image):\n\n if not image:\n raise Exception(\"image not found\")\n result = []\n for i, b in enumerate(image):\n if i % 39 == 0:\n result.append(\"\\n\")\n result.append(f\"{b:02X}\")\n return \"\".join(result)", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def show_ipv(data: np.ndarray):\n import ipyvolume as ipv\n return ipv.quickvolshow(data)", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def show(self, data):\n if isinstance(data, (numpy.ndarray, h5py.Dataset)):\n isAtomic = len(data.shape) == 0\n isCurve = len(data.shape) == 1 and numpy.issubdtype(data.dtype, numpy.number)\n isImage = len(data.shape) == 2 and numpy.issubdtype(data.dtype, numpy.number)\n if isAtomic:\n self.showAsString(data)\n elif isCurve:\n self.show1d(data)\n elif isImage:\n self.show2d(data)\n else:\n self.showAsString(data)\n else:\n self.showAsString(data)", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def pprint(self, data):\n self._assert(data)\n data = self._render(data) # make elements ascii\n fmats = self._fmats(data) # get array of padding formats)\n for row in data:\n print(fmats.format(*row))", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):\n\n for line in pixdump_iter( source, start, end, length, width, height, palette ):\n print( line )", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()", "def _part_generate_repr(img, steps, params, featsel, filt_l, legacy=True, debug=False):\n\n assert img.ndim == 2 or img.ndim == 3, \"must be gray or RGB\"\n\n if 'preproc_resize' in steps:\n img = _preproc_resize(img, params['preproc']['max_edge'], legacy)\n orig_imga = img.copy()\n if debug:\n print(\"orig_imga, mean {}, std {}\".format(orig_imga.mean(), orig_imga.std()))\n # convert image into gray scale, 2 dim array.\n response = img\n # this is the established way to deal with images for legacy behavior.\n if legacy:\n response = img.astype(np.float32) / 255.0\n\n if response.ndim == 3:\n response = 0.2989 * response[:, :, 0] + 0.5870 * response[:, :, 1] + 0.1140 * response[:, :, 2]\n\n assert response.ndim == 2, \"must be two channels!\"\n # in case we still have float64 dtype, in the case of non legacy\n response = response.astype(np.float32, copy=True)\n assert response.dtype == np.float32\n if debug:\n print(\"imga0, mean {}, std {}\".format(response.mean(), response.std()))\n\n if 'preproc_lowpass' in steps:\n response = _preproc_lowpass(response, params['preproc']['lsum_ksize'], legacy)\n imga0 = response.copy()\n if debug:\n print(\"imga0 normalized, mean {}, std {}\".format(imga0.mean(), imga0.std()))\n\n if 'normin' in steps:\n response = _normin(response, params['normin'], legacy)\n imga1 = response.copy()\n if debug:\n print(\"imga1, shape{}, mean {}, std {}\".format(imga1.shape, imga1.mean(), imga1.std()))\n\n if 'filter' in steps:\n response = _filter(response, filt_l, legacy, mode=params['filter']['mode'])\n else:\n response = response[:, :, np.newaxis]\n # make sure it's 3d.\n assert response.ndim == 3, \"must have a 3d response array\"\n imga2 = response.copy()\n if debug:\n print(\"imga2, shape {}, mean {}, std {}\".format(imga2.shape, imga2.mean(), imga2.std()))\n\n if 'activ' in steps:\n # 'type': 'clamp', # can also be `square`, `exp`, `recsquare`, `rec`\n response = _activ(response, params['activ'])\n imga3 = response.copy()\n if debug:\n print(\"imga3, shape {}, mean {}, std {}\".format(imga3.shape, imga3.mean(), imga3.std()))\n\n if 'normout' in steps:\n response = _normin(response, params['normout'], legacy)\n imga4 = response.copy()\n if debug:\n print(\"imga4, shape {}, mean {}, std {}\".format(imga4.shape, imga4.mean(), imga4.std()))\n\n if 'dimr' in steps:\n response = _dimr(response, params['dimr']['lsum_ksize'], params['dimr']['outshape'], legacy)\n if debug:\n print(\"output, shape {}, mean {}, std {}\".format(response.shape, response.mean(), response.std()))\n images = {'imga0': imga0,\n 'imga1': imga1,\n 'imga2': imga2,\n 'imga3': imga3,\n 'imga4': imga4,\n 'orig_imga': orig_imga}\n\n # handle additional features.\n # pure legacy functions.\n fvector = handle_feature_selection(response, images, featsel)\n return fvector", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def debug_image(self, state_index: int = -1):\n image = self.make_image(state_index, channel_type=\"n\")\n return np.array([np.sum(arr) for arr in image])[3:].reshape(8, 12)", "def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, \"\n \"format='%s', itemsize=%s, flags=%s)\" %\n (x, nd.shape, nd.strides, nd.suboffsets, offset,\n nd.format, nd.itemsize, flags))\n sys.stdout.flush()", "def display_napari(pos_img):\n global data\n global img_queue\n if pos_img is None:\n return\n # read image and z position\n image = np.reshape(pos_img[2:],(clip[0], clip[1]))\n z_pos = pos_img[1]\n color = pos_img[0]\n\n # write image into correct slice of data and update display\n data[z_pos] = np.squeeze(image)\n layer = viewer.layers[color]\n layer.data = data\n #print(\"updating \", z_pos, color)\n\n img_queue.task_done()", "def to_nii(self, outbase, spirec='spirec', saveInOut=False):\n if self.image_data is None:\n self.recon(spirec)\n\n image_tlhc = np.array([self.header.image.tlhc_R, self.header.image.tlhc_A, self.header.image.tlhc_S])\n image_trhc = np.array([self.header.image.trhc_R, self.header.image.trhc_A, self.header.image.trhc_S])\n image_brhc = np.array([self.header.image.brhc_R, self.header.image.brhc_A, self.header.image.brhc_S])\n #image_cent = np.array([self.header.image.ctr_R, self.header.image.ctr_A, self.header.image.ctr_S])\n\n row_vec = (image_trhc-image_tlhc)/np.sqrt(np.dot(image_trhc-image_tlhc, image_trhc-image_tlhc))\n col_vec = -(image_trhc-image_brhc)/np.sqrt(np.dot(image_trhc-image_brhc, image_trhc-image_brhc))\n # The DICOM standard defines these two unit vectors in an LPS coordinate frame, but we'll\n # need RAS (+x is right, +y is anterior, +z is superior) for NIFTI. So, we compute them\n # such that row_vec points to the right and col_vec points up.\n # Not sure if we need to negate the slice_norm. From the NIFTI-1 header:\n # The third column of R will be either the cross-product of the first 2 columns or\n # its negative. It is possible to infer the sign of the 3rd column by examining\n # the coordinates in DICOM attribute (0020,0032) \"Image Position (Patient)\" for\n # successive slices. However, this method occasionally fails for reasons that I\n # (RW Cox) do not understand.\n\n # can also get slice_norm from: slice_norm = np.cross(row_vec, col_vec)\n slice_norm = np.array([self.header.image.norm_R, self.header.image.norm_A, self.header.image.norm_S])\n slice_fov = np.abs(self.header.series.start_loc - self.header.series.end_loc)\n\n # This is either the first slice tlhc (image_tlhc) or the last slice tlhc. How to decide?\n # And is it related to wheather I have to negate the slice_norm?\n # Tuned this empirically by comparing spiral and EPI data with the sam Rx.\n # Everything seems reasonable, except the test for axial orientation (start_ras==S|I).\n # I have no idea why I need that! But the flipping only seems necessary for axials, not\n # coronals or the few obliques I've tested.\n # FIXME: haven't tested sagittals! (to test for spiral: 'sprt' in self.psd_name.lower())\n if (self.header.series.start_ras=='S' or self.header.series.start_ras=='I') and self.header.series.start_loc > self.header.series.end_loc:\n pos = image_tlhc - slice_norm*slice_fov\n # FIXME: since we are reversing the slice order here, should we change the slice_order field below?\n self.image_data = self.image_data[:,:,::-1,]\n if self.fm_data is not None:\n self.fm_data = self.fm_data[:,:,::-1,]\n else:\n pos = image_tlhc\n\n if self.num_bands > 1:\n pos = pos - slice_norm * self.band_spacing_mm * (self.num_bands - 1.0) / 2.0\n\n qto_xyz = np.zeros((4,4))\n qto_xyz[0,0] = row_vec[0]\n qto_xyz[0,1] = col_vec[0]\n qto_xyz[0,2] = slice_norm[0]\n\n qto_xyz[1,0] = row_vec[1]\n qto_xyz[1,1] = col_vec[1]\n qto_xyz[1,2] = slice_norm[1]\n\n qto_xyz[2,0] = row_vec[2]\n qto_xyz[2,1] = col_vec[2]\n qto_xyz[2,2] = slice_norm[2]\n\n qto_xyz[:,3] = np.append(pos, 1).T\n qto_xyz[0:3,0:3] = np.dot(qto_xyz[0:3,0:3], np.diag(self.mm_per_vox))\n\n nii_header = nibabel.Nifti1Header()\n nii_header.set_xyzt_units('mm', 'sec')\n nii_header.set_qform(qto_xyz, 'scanner')\n nii_header.set_sform(qto_xyz, 'scanner')\n\n nii_header['slice_start'] = 0\n nii_header['slice_end'] = self.num_slices - 1\n # nifti slice order codes: 0 = unknown, 1 = sequential incrementing, 2 = seq. dec., 3 = alternating inc., 4 = alt. dec.\n slice_order = 0\n nii_header['slice_duration'] = self.tr * 1000 / self.num_slices\n # FIXME: check that this is correct.\n if self.header.series.se_sortorder == 0:\n slice_order = 1 # or 2?\n elif self.header.series.se_sortorder == 1:\n slice_order = 3 # or 4?\n nii_header['slice_code'] = slice_order\n\n # Note: the freq/phase dir isn't meaningful for spiral trajectories.\n if self.header.image.freq_dir==1:\n nii_header.set_dim_info(freq=1, phase=0, slice=2)\n else:\n nii_header.set_dim_info(freq=0, phase=1, slice=2)\n\n # FIXME: There must be a cleaner way to set the TR! Maybe bug Matthew about it.\n nii_header.structarr['pixdim'][4] = self.tr\n nii_header.set_slice_duration(nii_header.structarr['pixdim'][4] / self.num_slices)\n nii_header.structarr['cal_max'] = self.image_data.max()\n nii_header.structarr['cal_min'] = self.image_data.min()\n\n if self.num_echoes == 1:\n nifti = nibabel.Nifti1Image(self.image_data, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n elif self.num_echoes == 2:\n if saveInOut:\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,0], None, nii_header)\n nibabel.save(nifti, outbase + '_in.nii.gz')\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,1], None, nii_header)\n nibabel.save(nifti, outbase + '_out.nii.gz')\n # FIXME: Do a more robust test for spiralio!\n # Assume spiralio, so do a weighted average of the two echos.\n # FIXME: should do a quick motion correction here\n w_in = np.mean(self.image_data[:,:,:,:,0], 3)\n w_out = np.mean(self.image_data[:,:,:,:,1], 3)\n inout_sum = w_in + w_out\n w_in = w_in / inout_sum\n w_out = w_out / inout_sum\n avg = np.zeros(self.image_data.shape[0:4])\n for tp in range(self.image_data.shape[3]):\n avg[:,:,:,tp] = w_in*self.image_data[:,:,:,tp,0] + w_out*self.image_data[:,:,:,tp,1]\n nifti = nibabel.Nifti1Image(avg, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n else:\n for echo in range(self.num_echoes):\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,echo], None, nii_header)\n nibabel.save(nifti, outbase + '_echo%02d.nii.gz' % echo)\n\n if self.fm_data is not None:\n nii_header.structarr['cal_max'] = self.fm_data.max()\n nii_header.structarr['cal_min'] = self.fm_data.min()\n nifti = nibabel.Nifti1Image(self.fm_data, None, nii_header)\n nibabel.save(nifti, outbase + '_B0.nii.gz')", "def create_displayable_test_output(test_image):\n if hasattr(test_image, \"numpy\"):\n return np.squeeze(test_image.numpy())[:, :, 1:]\n else:\n return np.squeeze(test_image)[:, :, 1:]", "def show_image(self, pic, prediction=None):\n digitmap = {\n 0: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 1: [(0,2), (1,2), (2,2), (3,2), (4,2)],\n 2: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,0), (4,0), (4,1), (4,2)],\n 3: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 4: [(0,0), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,2)],\n 5: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 6: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,0), (3,2), (4,0), (4,1), (4,2)],\n 7: [(0,0), (0,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 8: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2), (2,1)],\n 9: [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)]\n }\n\n pic = pic.reshape((28,28)).copy()\n if prediction is not None:\n for pos in digitmap[prediction]:\n pic[pos]=255\n plt.imshow(pic, cmap='gray_r')", "def print_bitmap(self, w, h, image):\n\n bitmap = self._pack_bitmap(w, h, image)\n\n row_bytes = (w + 7) // 8 # Round up to next byte boundary\n\n if row_bytes >= 48:\n row_bytes_clipped = 48\n else:\n row_bytes_clipped = row_bytes # 384 pixels max width\n\n # Est. max rows to write at once, assuming 256 byte printer buffer.\n if self._dtr_enabled:\n chunk_height_limit = 255 # Buffer doesn't matter, handshake!\n else:\n chunk_height_limit = 256 // row_bytes_clipped\n if chunk_height_limit > self._max_chunk_height:\n chunk_height_limit = self._max_chunk_height\n elif chunk_height_limit < 1:\n chunk_height_limit = 1\n\n row_start = 0\n i = 0\n while row_start < h:\n # Issue up to chunkHeightLimit rows at a time:\n chunk_height = h - row_start\n if chunk_height > chunk_height_limit:\n chunk_height = chunk_height_limit\n\n self.write(self.ASCII_DC2, '*', chunk_height, row_bytes_clipped)\n\n y = 0\n while y < chunk_height:\n x = 0\n while x < row_bytes_clipped:\n self.timeout_wait()\n self._send_to_printer(int(bitmap[i]))\n x += 1\n i += 1\n\n y += 1\n\n i += row_bytes - row_bytes_clipped\n\n self.timeout_set(chunk_height * self._dot_print_time)\n\n row_start += chunk_height_limit\n\n self._prev_byte = '\\n'", "def long_slice(image_data):\n\n\t# Process binary data and open.\n\tim = image_data.split('base64,')[1]\n\tim = base64.b64decode(im)\n\tim = io.BytesIO(im)\n\t\n\timg = Image.open(im)\n\twidth, height = img.size\n\tupper = 0\n\tleft = 0\n\t\n\t# Max height to fit pdf.\n\tmax_height_mm = 198\n\tmax_height = (max_height_mm * 96) / 25.4\n\n\tslice_size = max_height\n\n\tslices = int(math.ceil(height/slice_size))\n\tcount = 1\n\n\tfinal_slices = []\n\tfor slice in range(slices):\n\t\t# If no more slices needed, set the lower bound to bottom of image.\n\t\tif count == slices:\n\t\t\tlower = height\n\t\telse:\n\t\t\tlower = int(count * slice_size) \n\t\t \n\t\t# Set the bounding box. \n\t\tbbox = (left, upper, width, lower)\n\t\tworking_slice = img.crop(bbox)\n\n\t\t# Save png as bytes object.\n\t\tbyte_io = io.BytesIO()\n\t\tworking_slice.save(byte_io, 'png')\n\t\t\n\t\t# Convert bytes object to base64 string and save to list.\n\t\timg_str = base64.b64encode(byte_io.getvalue())\n\t\timg_str = 'data:image/png;base64,' + img_str.decode()\n\t\tfinal_slices.append(img_str)\n\n\t\tupper = upper + slice_size\n\t\tcount = count + 1\n\n\treturn final_slices", "def muestraPokemon(bytes):\n image = Image.open(io.BytesIO(bytes))\n data = np.array(image)\n plt.imshow(data)\n plt.axis('off')\n plt.show()", "def convertData(img):\n dataset = []\n for i in img:\n dataset.append(format(ord(i), '08b'))\n return dataset", "def img_to_ascii(**kwargs):\n ascii_chars = [ u'Z', u'Q', u'T', u'W', u'E', u'K', u'P', u'L', u'I', u'C', u'Y']\n \n width = kwargs.get('width',200)\n path = kwargs.get('path',None)\n\n\n\n im = Image.open(path)\n\n im = resize(im,width)\n\n # w,h = im.size\n\n # this is used as storage. It stores the original picture's color values\n objToGo = list(im.convert(\"RGBA\").getdata())\n\n im = im.convert(\"L\") # convert to grayscale\n\n imlist = list(im.getdata())\n\n i = 0\n j = 0\n # chList is the characters that will be printed. It is a 2D array\n chList = []\n chList.append([])\n for val in imlist:\n ch = ascii_chars[val // 25] #.decode('utf-8')\n chList[j].append(ch)\n sys.stdout.write(ch)\n i += 1\n if i % width == 0:\n sys.stdout.write(\"\\n\")\n chList.append([])\n j += 1\n i = 0\n\n return chList,objToGo", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n self.command(\n const.COLUMNADDR, 0x00, self.width-1, # Column start/end address\n const.PAGEADDR, 0x00, self.pages-1) # Page start/end address\n\n pix = list(image.getdata())\n step = self.width * 8\n buf = []\n for y in xrange(0, self.pages * step, step):\n i = y + self.width-1\n while i >= y:\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[i + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n i -= 1\n\n self.data(buf)", "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def snapshot(self):\n text = \"\"\n text += \"{}:\\n{}\\n\".format('chi', np.array2string(self.chi))\n return text", "def get_itk_data(path_or_image, verbose=False):\n\n if isinstance(path_or_image, str):\n image = get_itk_image(path_or_image)\n else:\n image = path_or_image\n\n arr = itk.GetArrayFromImage(image)\n shape = arr.shape\n spacing = image.GetSpacing()[::-1]\n data_type = arr.dtype\n\n if verbose:\n print '\\t image shape: ' + str(shape)\n print '\\t image spacing: ' + str(spacing)\n print '\\t image data type: ' + str(data_type)\n\n return arr, shape, spacing", "def dumpData(self,out,index):\n #--SCVR\n out.pack('4siBB2sB',\n 'SCVR', 5+len(self.text), index+48, self.type, self.func, self.oper)\n if self.text: out.write(self.text)\n #--Value\n if isinstance(self.value,int):\n out.packSub('INTV','i', self.value)\n else:\n out.packSub('FLTV','f', self.value)", "def __str__(self):\n out = ' '\n for j in range(self.width):\n out += str(j + 1) + ' '\n out += '\\n'\n for i in range(self.height):\n out += '\\n'\n out += str(i + 1) + ' '\n for j in range(self.width):\n out += self.data[i][j] + ' '\n return out", "def explore_data(dataset, start, end, rows_and_columns=False):\r\n for i in range(start,end):\r\n print(dataset[i],end=\"\\n\")", "def get_image_summary(img, idx=0):\n\n V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1))\n V -= tf.reduce_min(V)\n V /= tf.reduce_max(V)\n V *= 255\n\n img_w = tf.shape(img)[1]\n img_h = tf.shape(img)[2]\n V = tf.reshape(V, tf.stack((img_w, img_h, 1)))\n V = tf.transpose(V, (2, 0, 1))\n V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1)))\n return V", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def print_image_info(self):\r\n\r\n maxt = np.max(self.times)\r\n\r\n print (\" Duration of Image Stack: %9.3f s (%8.3f min) period = %8.3f s\" % (maxt, maxt/60.0, self.period))\r\n\r\n print (' Image shape: ', self.imageData.shape)\r\n\r\n print (' nFrames: %d framerate: %9.3f\\n' % (self.nFrames, self.framerate))", "def print_data(self, data):\n self.print_indicator = True\n self.imagedata = data\n self.setImage(self.imagedata)\n\n self.indicator_min = -200\n self.indicator_max = 200\n\n if self.video_model != None:\n pos = int(self.video_model.get_pos(datatype = \"motion\"))\n self.indicator = self.view.plot([pos,pos],[self.indicator_min,self.indicator_max],pen=pyqtgraph.mkPen(color=pyqtgraph.hsvColor(2),width=1))", "def printFactoryImageStruct(processedImagePath, trailerSize, numLinesImageContent, descripFixedSize):\n\n cuttingLine = \"--------\"\n subCuttingLine = \"-----\"\n\n # print magic code\n with open(processedImagePath, \"rb\") as f:\n print(cuttingLine + subCuttingLine + \"Magic Code\" + subCuttingLine + cuttingLine)\n byte = f.read(8)\n print(byte)\n\n # print [ota_descriptor + image content]\n printOTADescriptorImageStruct(processedImagePath, numLinesImageContent, 8)\n\n # print trailer\n fSize = getFileSize(processedImagePath)\n f = open(processedImagePath, \"rb\")\n f.seek(fSize - trailerSize)\n\n print(cuttingLine + cuttingLine + \"Trailer\" + cuttingLine + cuttingLine)\n\n try:\n print(subCuttingLine + \" signature type \" + subCuttingLine)\n byte = f.read(descripFixedSize)\n print(byte)\n\n print(subCuttingLine + \" signature size \" + subCuttingLine)\n byte = f.read(4)\n print(format32BitHexStr(hex(struct.unpack('<I', byte)[0])))\n\n byte = f.read()\n print(subCuttingLine + \" signature \" + subCuttingLine)\n print(byte)\n\n finally:\n f.close()", "def imageToArray(i):\r\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\r\n a.shape=i.im.size[1], i.im.size[0]\r\n return a", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\tprint(data_chunk.shape)\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def showTensorImg(ts, title):\n img = np.transpose(ts, (1, 2, 0))\n showImg(img, title)\n return", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: Any):\n raise NotImplementedError", "def get_image():\r\n\t\treturn \"\\n\".join(\"\".join(row) for row in picture)", "def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)", "def _repr_html_(self):\n\n import numpy as np\n import matplotlib.pyplot as plt\n from .._tier9 import imshow\n\n\n size_in_pixels = np.prod(self.shape)\n size_in_bytes = size_in_pixels * self.dtype.itemsize\n\n labels = (self.dtype == np.uint32)\n\n # In case the image is 2D, 3D and larger than 100 pixels, turn on fancy view\n if len(self.shape) in (2, 3) and size_in_pixels >= 100:\n import matplotlib.pyplot as plt\n imshow(self,\n labels=labels,\n continue_drawing=True,\n colorbar=not labels)\n image = self._png_to_html(self._plt_to_png())\n else:\n return \"<pre>cle.array(\" + str(np.asarray(self)) + \", dtype=\" + str(self.dtype) + \")</pre>\"\n\n\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n size = \"{:.1f}\".format(size_in_bytes) + \" GB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" MB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" kB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" B\"\n\n histogram = \"\"\n\n if size_in_bytes < 100 * 1024 * 1024:\n if not labels:\n\n import numpy as np\n from .._tier2 import minimum_of_all_pixels, maximum_of_all_pixels\n from .._tier3 import histogram\n\n num_bins = 32\n\n h = np.asarray(histogram(self, num_bins=num_bins))\n\n plt.figure(figsize=(1.8, 1.2))\n plt.bar(range(0, len(h)), h)\n\n # hide axis text\n # https://stackoverflow.com/questions/2176424/hiding-axis-text-in-matplotlib-plots\n # https://pythonguides.com/matplotlib-remove-tick-labels\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([])\n frame1.axes.yaxis.set_ticklabels([])\n plt.tick_params(left=False, bottom=False)\n\n histogram = self._png_to_html(self._plt_to_png())\n\n min_max = \"<tr><td>min</td><td>\" + str(self.min()) + \"</td></tr>\" + \\\n \"<tr><td>max</td><td>\" + str(self.max()) + \"</td></tr>\"\n\n else:\n\n min_max = \"\"\n\n all = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style=\\\"text-align: center; vertical-align: top;\\\">\",\n \"<b><a href=\\\"https://github.com/clEsperanto/pyclesperanto_prototype\\\" target=\\\"_blank\\\">cle._</a> image</b><br/>\",\n \"<table>\",\n \"<tr><td>shape</td><td>\" + str(self.shape).replace(\" \", \"&nbsp;\") + \"</td></tr>\",\n \"<tr><td>dtype</td><td>\" + str(self.dtype) + \"</td></tr>\",\n \"<tr><td>size</td><td>\" + size + \"</td></tr>\",\n min_max,\n \"</table>\",\n histogram,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n\n return \"\\n\".join(all)", "def array2ipl(img): \n img_new = cv.CreateImageHeader((img.shape[1], img.shape[0]), cv.IPL_DEPTH_8U, 3)\n cv.SetData(img_new, img.copy().data,img.dtype.itemsize*3*img.shape[1])\n img_new[50,75]\n return img_new", "def pildumps(image, format=\"PNG\"):\n result = StringIO.StringIO()\n if image.dtype in [np.dtype('f'), np.dtype('d')]:\n assert np.amin(image) > -0.001 and np.amax(image) < 1.001\n image = np.clip(image, 0.0, 1.0)\n image = np.array(image * 255.0, 'uint8')\n PIL.Image.fromarray(image).save(result, format=format)\n return result.getvalue()", "def picture_binary(runtime_addr, n=1):\n\n set_formatter(runtime_addr, n, mainformatter.picture_binary_formatter)", "def DebugFormat(self):\n print FormatAsBits((self.output, self.out_boff))\n for i in xrange(self.idx_byte*8 + self.idx_boff - 1):\n if not i % 8:\n sys.stdout.write(\"|\")\n sys.stdout.write(\"-\")\n print \"^\"", "def imdisplay(filename, representation):\n\n image = read_image(filename, representation)\n plt.imshow(image, cmap=\"gray\")\n plt.show()", "def print_image_info(image, resize=rsz_default, kernel=kernel_size):\n\tprint \"Image Size: {0}\".format(image.shape)\n\tprint \"Image Max: {0}\".format(image.max())\n\tprint \"Image Min: {0}\".format(image.min())\n\tprint \"Image Mean: {0}\".format(image.mean())\n\tprint \"Image dtype: {0}\\n\".format(image.dtype)\n\timage = to_uint8(image)\n\timage_prep = preprocess(image, resize=resize, kernel=kernel)\n\tcontour = get_contour(image_prep)\n\tM = get_image_moments(contour=contour)\n\tsecond_m = ['m20', 'm11', 'm02', 'm30', 'm21', 'm12', 'm03']\n\tprint \"Zero Order Moment: {0}\".format(M['m00'])\n\tprint \"First Order Moments: {0}, {1}\".format(M['m10'], M['m01'])\n\tprint \"Second Order Moments:\"\n\tsecond_m_str = ''\n\tfor m2 in second_m:\n\t\tsecond_m_str += \"{0},\".format(M[m2])\n\tprint second_m_str[:-1]", "def observation(self, img):\r\n img = img.transpose(1, 2, 0)\r\n return img", "def printData (data):\n print(str(len(data)) + '\\t' + str(data))", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def showBPImg(pV,nV):\n # object arrays of the positive and negative images\n inv_crop = np.empty(8, dtype=object)\n inv_crop2 = np.empty(8, dtype=object)\n for t in range(8):\n # backprojection functions\n inverse = retina.inverse(pV[:,t,:],x,y,dcoeff[i],dloc[i], GI, imsize=imgsize,rgb=True)\n inv_crop[t] = retina.crop(inverse,x,y,dloc[i])\n inverse2 = retina.inverse(nV[:,t,:],x,y,dcoeff[i],dloc[i], GI, imsize=imgsize,rgb=True)\n inv_crop2[t] = retina.crop(inverse2,x,y,dloc[i])\n # place descriptions\n cv2.putText(inv_crop[t],types[t] + \" + \",(xx,yy), font, 1,(0,255,255),2)\n cv2.putText(inv_crop2[t],types[t] + \" - \",(xx,yy), font, 1,(0,255,255),2)\n # stack all images into a grid\n posRG = np.vstack((inv_crop[:4]))\n negRG = np.vstack((inv_crop2[:4]))\n posYB = np.vstack((inv_crop[4:]))\n negYB = np.vstack((inv_crop2[4:]))\n merge = np.concatenate((posRG,negRG,posYB,negYB),axis=1)\n return merge", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def info(self):\n\n\t\tprint(\"Pixels on a side: {0}\".format(self.data.shape[0]))\n\t\tprint(\"Pixel size: {0}\".format(self.resolution))\n\t\tprint(\"Total angular size: {0}\".format(self.side_angle))\n\t\tprint(\"lmin={0:.1e} ; lmax={1:.1e}\".format(self.lmin,self.lmax))", "def print_wrapped(data, ncols=3):\r\n nrows = len(data)\r\n labels = data.index\r\n n_split_rows = int(np.ceil(nrows / ncols))\r\n for r in range(0, nrows, ncols):\r\n for c in range(ncols):\r\n try:\r\n numstr = '{}'.format(data[r + c])\r\n tabs = [' '] * (20 - len(labels[r + c]) - len(numstr))\r\n print(labels[r + c] + \"\".join(tabs) + numstr, end='\\t')\r\n except:\r\n pass\r\n print()", "def __repr__(self) -> str:\n\n thresh = np.get_printoptions()[\"threshold\"]\n np.set_printoptions(threshold=20)\n extra_chars = len(self.__class__.__name__)\n arr_str = \"data=\" + str(self.data).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 6))\n shape_str = (\n \" \" * extra_chars\n + \" shape=\"\n + str(self.shape).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 7))\n )\n dtype_str = \" \" * extra_chars + \" dtype=\" + str(self.dtype)\n np.set_printoptions(threshold=thresh)\n return \"{klass}({data},\\n{shape},\\n{dtype})\".format(\n klass=self.__class__.__name__,\n data=arr_str,\n shape=shape_str,\n dtype=dtype_str,\n )", "def img_to_slices(img):\n res = []\n\n for i, slice_img in enumerate(img):\n res.append(slice_img)\n return res", "def pngxy(data):\n ihdr = data.index(b'IHDR')\n # next 8 bytes are width/height\n w4h4 = data[ihdr+4:ihdr+12]\n return struct.unpack('>ii', w4h4)", "def print_stats(cars, notcars):\n print(\"Number of car samples: {0}\".format(len(cars)))\n print(\"Number of non car samples: {0}\".format(len(notcars)))\n img = cv2.imread(cars[0])\n print(\"Image shape: {0}x{1}\".format(img.shape[0], img.shape[1]))\n print(\"Image datatype: {}\".format(img.dtype))", "def __repr__(self):\n lstout = [\"Azimuthal Integrator:\", self.ai.__repr__(),\n \"Input image shape: %s\" % list(self.shapeIn),\n \"Number of points in radial direction: %s\" % self.nbpt_rad,\n \"Number of points in azimuthal direction: %s\" % self.nbpt_azim,\n \"Unit in radial dimension: %s\" % self.unit.REPR,\n \"Correct for solid angle: %s\" % self.correct_solid_angle,\n \"Polarization factor: %s\" % self.polarization,\n \"Dark current image: %s\" % self.dark_current_image,\n \"Flat field image: %s\" % self.flat_field_image,\n \"Mask image: %s\" % self.mask_image,\n \"Dummy: %s,\\tDelta_Dummy: %s\" % (self.dummy, self.delta_dummy),\n \"Directory: %s, \\tExtension: %s\" % (self.subdir, self.extension)]\n return os.linesep.join(lstout)", "def write(self, image):\n raise NotImplementedError()", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def long_slice(image_path, out_name, outdir, slice_size, net):\n img = Image.open(image_path)\n imgout = Image.open(image_path)\n orw, orh = img.size\n width, height = img.size\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n orw, orh = imgout.size\n width, height = img.size\n print(img.size)\n r = 1\n draw = ImageDraw.Draw(imgout)\n\n flag_continue = True\n while flag_continue:\n if os.path.exists(\"./testsliceimage/list.txt\"):\n os.remove(\"./testsliceimage/list.txt\")\n file = open(\"./testsliceimage/list.txt\", \"w+\")\n for sliceh in range(slicesh*step):\n for slicew in range(slicesw*step):\n #set the bounding box! The important bit\n bbox = (int(slicew*slice_size/step), int(sliceh*slice_size/step), int(slicew*slice_size/step)+slice_size, int(sliceh*slice_size/step)+slice_size)\n working_slice = img.crop(bbox)\n\n working_slice.save(os.path.join(outdir, \"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\"))\n file.write(\"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\\n\")\n\n if sliceh == 16 and slicew == 27 and width == 450 :\n print (int(slicew*slice_size/step), int(sliceh*slice_size/step),int(slicew*slice_size/step)+slice_size,int(sliceh*slice_size/step)+slice_size)\n\n file.close()\n transform_test = tf.Compose([tf.Grayscale(), tf.ToTensor(), tf.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n testset = UnknownDataset(\"./testsliceimage/\", \"./testsliceimage/list.txt\", transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=WORKERS)\n\n with torch.no_grad():\n N = 0\n for data in testloader:\n images, img_names = data['image'], data['image_name']\n outputs = net(images.float())\n _, predicted = torch.max(outputs.data, 1)\n # print(predicted)\n if max(predicted) == 1 :\n ite = -1\n for predic in predicted :\n ite += 1\n if predic == 1 and outputs[ite][1]-outputs[ite][0] > CONFIDENCE:\n print(img_names[ite])\n # print(outputs)\n N += 1\n #dessiner carre sur image\n slh = int(img_names[ite].split('_')[4])\n slw = int(img_names[ite].split('_')[5][:-4])\n x1 = int(slh * slice_size / step)\n x2 = x1 + slice_size\n y1 = int(slw * slice_size / step)\n y2 = y1 + slice_size\n\n if slh == 16 and slw == 27 and width ==450 :\n print (x1, y1, x2, y2)\n\n print(r)\n rh = orh / height\n rw = orw / width\n x1 = x1 * rh\n x2 = x2 * rh\n y1 = y1 * rw\n y2 = y2 * rw\n\n draw.rectangle(((y1, x1), (y2, x2)), outline=\"red\")\n # draw.text((y2,x2), img_names[0])\n copyfile(\"./testsliceimage/\"+img_names[ite], \"./goodimage/\"+ img_names[ite])\n\n if width <= 200 or height <= 200:\n flag_continue = False\n else:\n r = r * scale\n width, height = int(width/scale), int(height/scale)\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n width, height = img.size\n\n # imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout.save(\"./rectangle/out\", \"PNG\")", "def array_to_imagefile(data, imagefname,verbose=False):\n if data.ndim == 2:\n data = np.dstack([data,data,data])\n data = np.rollaxis(data,-1)\n # print(data.shape)\n img = Image.fromarray(np.uint8(np.rollaxis(np.rollaxis(data,-1),-1)))\n if data.ndim == 2:\n if data.shape[3] == 3:\n img = img.convert(mode=\"RGB\")\n img.mode='RGB'\n if data.shape[3] == 4:\n img = img.convert(mode=\"RGBA\")\n img.mode='RGBA'\n \n \n if verbose:\n print(\"saving \", os.path.realpath(imagefname))\n img.save(imagefname)\n return 1", "def display_mask(i):\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)", "def dump(self):\n res = []\n #res.append(\"Submeshes: %d\" % len(self.submeshes))\n #res.append(\"IdxBuf: 0x%04X bytes\" % len(self.idx_buf))\n #res.append(\"PrimFmt: 0x%04X (%s)\" % (\n # self.prim_fmt_id, self.prim_fmt))\n #res.append(\"IdxType: 0x%02X (%s)\" % (\n # self.header['idx_type'], self.idx_fmt,\n #))\n #res.append(\"IdxCnt: %d\" % self.header['idx_cnt'])\n #res.append(\"VisGrp: %d\" % self.header['visibility_group'])\n #res.append(\"Unknown: 0x%08X 0x%08X 0x%08X\" % (\n # self.header['unk08'],\n # self.header['unk10'],\n # self.header['unk34'],\n #))\n #return '\\n'.join(res).replace('\\n', '\\n ')\n\n return \"%4d│%04X│%04X %-24s│%02X %s│%5d│%5d│%08X│%08X│%08X\" %(\n len(self.submeshes),\n len(self.idx_buf),\n self.prim_fmt_id, self.prim_fmt,\n self.header['idx_type'], self.idx_fmt,\n self.header['idx_cnt'],\n self.header['visibility_group'],\n self.header['unk08'], self.header['unk10'],\n self.header['unk34'],\n )" ]
[ "0.7314874", "0.72975045", "0.7103524", "0.7012286", "0.6931774", "0.67332554", "0.6709795", "0.6601998", "0.6534724", "0.6502416", "0.6426161", "0.6281741", "0.6222216", "0.6221346", "0.6040175", "0.60378903", "0.59890413", "0.594744", "0.5930997", "0.5928868", "0.5903941", "0.59003437", "0.5883875", "0.5843126", "0.5829642", "0.5817241", "0.57438475", "0.5733415", "0.5705191", "0.56490237", "0.564471", "0.564133", "0.560874", "0.55419356", "0.55270183", "0.5504326", "0.55023485", "0.5468386", "0.54592705", "0.54488075", "0.5439342", "0.5439342", "0.53984785", "0.53956", "0.538721", "0.53816783", "0.53741443", "0.5359987", "0.53278923", "0.53060055", "0.53002876", "0.5285969", "0.5282924", "0.52804214", "0.5276673", "0.5263416", "0.52457637", "0.5238334", "0.5236043", "0.52322173", "0.52292687", "0.52229244", "0.5214367", "0.52129495", "0.51873314", "0.5186461", "0.5186243", "0.5185942", "0.51848996", "0.51743615", "0.5167385", "0.51602036", "0.51575047", "0.5149333", "0.5147734", "0.5139543", "0.51365316", "0.51330566", "0.51330537", "0.5131669", "0.5131621", "0.5126532", "0.5125787", "0.51157516", "0.51150376", "0.5108551", "0.51084226", "0.51015747", "0.51001865", "0.5097678", "0.50927705", "0.50916517", "0.50817674", "0.5073145", "0.5073145", "0.5072777", "0.5060324", "0.5054868", "0.5054794", "0.5050084" ]
0.7488905
0
Print the data in slice iz of an image to standard out in a format that agrees with v2
def print_image_slice_3d(input, num=0,direction="z"): #print "print slice at 3 directions" image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() if(direction=="x"): #print "xxxxx" ix=num print "(x = %d slice)" % (ix) line = [] for iz in xrange(nz-1,-1,-1): line.append("Z ") line.append("%4i " % iz) for iy in xrange(ny): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((iy + 1) % 5 == 0): line.append("\n ") line.append(" ") line.append("\n") if(ny%5 != 0): line.append("\n") print "".join(line) elif(direction=="y"): #print "yyy" iy=num print "(y = %d slice)" % (iy) line = [] for iz in xrange(nz-1,-1,-1): line.append("Z ") line.append("%4i " % iz) for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append(" ") line.append("\n") if(nx%5 != 0): line.append("\n") print "".join(line) else: #print "zzzz" iz=num print "(z = %d slice)" % (iz) line = [] for iy in xrange(ny-1,-1,-1): line.append("Row ") line.append("%4i " % iy) for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append(" ") line.append("\n") if(nx%5 != 0): line.append("\n") print "".join(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def _convert_and_print_image(self, im):\n pixLine = \"\"\n imLeft = \"\"\n imRight = \"\"\n switch = 0\n imgSize = [0, 0]\n\n if im.size[0] > 512:\n print (\"WARNING: Image is wider than 512 and could be truncated at print time \")\n if im.size[1] > 255:\n raise ValueError(\"Image Height larger than 255\")\n\n imBorder = self._check_image_size(im.size[0])\n for i in range(imBorder[0]):\n imLeft += \"0\"\n for i in range(imBorder[1]):\n imRight += \"0\"\n\n for y in range(im.size[1]):\n imgSize[1] += 1\n pixLine += imLeft\n imgSize[0] += imBorder[0]\n for x in range(im.size[0]):\n imgSize[0] += 1\n RGB = im.getpixel((x, y))\n imColor = (RGB[0] + RGB[1] + RGB[2])\n imPattern = \"1X0\"\n patternLen = len(imPattern)\n switch = (switch - 1) * (-1)\n for x in range(patternLen):\n if imColor <= (255 * 3 / patternLen * (x + 1)):\n if imPattern[x] == \"X\":\n pixLine += \"%d\" % switch\n else:\n pixLine += imPattern[x]\n break\n elif imColor > (255 * 3 / patternLen * patternLen) and imColor <= (255 * 3):\n pixLine += imPattern[-1]\n break\n pixLine += imRight\n imgSize[0] += imBorder[1]\n\n self._print_image(pixLine, imgSize)", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def printImage(imageObject):\n # TODO\n pass", "def __writeImageBytes(self, image):\n\n if not image:\n raise Exception(\"image not found\")\n result = []\n for i, b in enumerate(image):\n if i % 39 == 0:\n result.append(\"\\n\")\n result.append(f\"{b:02X}\")\n return \"\".join(result)", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def show_ipv(data: np.ndarray):\n import ipyvolume as ipv\n return ipv.quickvolshow(data)", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def show(self, data):\n if isinstance(data, (numpy.ndarray, h5py.Dataset)):\n isAtomic = len(data.shape) == 0\n isCurve = len(data.shape) == 1 and numpy.issubdtype(data.dtype, numpy.number)\n isImage = len(data.shape) == 2 and numpy.issubdtype(data.dtype, numpy.number)\n if isAtomic:\n self.showAsString(data)\n elif isCurve:\n self.show1d(data)\n elif isImage:\n self.show2d(data)\n else:\n self.showAsString(data)\n else:\n self.showAsString(data)", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def pprint(self, data):\n self._assert(data)\n data = self._render(data) # make elements ascii\n fmats = self._fmats(data) # get array of padding formats)\n for row in data:\n print(fmats.format(*row))", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):\n\n for line in pixdump_iter( source, start, end, length, width, height, palette ):\n print( line )", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()", "def _part_generate_repr(img, steps, params, featsel, filt_l, legacy=True, debug=False):\n\n assert img.ndim == 2 or img.ndim == 3, \"must be gray or RGB\"\n\n if 'preproc_resize' in steps:\n img = _preproc_resize(img, params['preproc']['max_edge'], legacy)\n orig_imga = img.copy()\n if debug:\n print(\"orig_imga, mean {}, std {}\".format(orig_imga.mean(), orig_imga.std()))\n # convert image into gray scale, 2 dim array.\n response = img\n # this is the established way to deal with images for legacy behavior.\n if legacy:\n response = img.astype(np.float32) / 255.0\n\n if response.ndim == 3:\n response = 0.2989 * response[:, :, 0] + 0.5870 * response[:, :, 1] + 0.1140 * response[:, :, 2]\n\n assert response.ndim == 2, \"must be two channels!\"\n # in case we still have float64 dtype, in the case of non legacy\n response = response.astype(np.float32, copy=True)\n assert response.dtype == np.float32\n if debug:\n print(\"imga0, mean {}, std {}\".format(response.mean(), response.std()))\n\n if 'preproc_lowpass' in steps:\n response = _preproc_lowpass(response, params['preproc']['lsum_ksize'], legacy)\n imga0 = response.copy()\n if debug:\n print(\"imga0 normalized, mean {}, std {}\".format(imga0.mean(), imga0.std()))\n\n if 'normin' in steps:\n response = _normin(response, params['normin'], legacy)\n imga1 = response.copy()\n if debug:\n print(\"imga1, shape{}, mean {}, std {}\".format(imga1.shape, imga1.mean(), imga1.std()))\n\n if 'filter' in steps:\n response = _filter(response, filt_l, legacy, mode=params['filter']['mode'])\n else:\n response = response[:, :, np.newaxis]\n # make sure it's 3d.\n assert response.ndim == 3, \"must have a 3d response array\"\n imga2 = response.copy()\n if debug:\n print(\"imga2, shape {}, mean {}, std {}\".format(imga2.shape, imga2.mean(), imga2.std()))\n\n if 'activ' in steps:\n # 'type': 'clamp', # can also be `square`, `exp`, `recsquare`, `rec`\n response = _activ(response, params['activ'])\n imga3 = response.copy()\n if debug:\n print(\"imga3, shape {}, mean {}, std {}\".format(imga3.shape, imga3.mean(), imga3.std()))\n\n if 'normout' in steps:\n response = _normin(response, params['normout'], legacy)\n imga4 = response.copy()\n if debug:\n print(\"imga4, shape {}, mean {}, std {}\".format(imga4.shape, imga4.mean(), imga4.std()))\n\n if 'dimr' in steps:\n response = _dimr(response, params['dimr']['lsum_ksize'], params['dimr']['outshape'], legacy)\n if debug:\n print(\"output, shape {}, mean {}, std {}\".format(response.shape, response.mean(), response.std()))\n images = {'imga0': imga0,\n 'imga1': imga1,\n 'imga2': imga2,\n 'imga3': imga3,\n 'imga4': imga4,\n 'orig_imga': orig_imga}\n\n # handle additional features.\n # pure legacy functions.\n fvector = handle_feature_selection(response, images, featsel)\n return fvector", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def debug_image(self, state_index: int = -1):\n image = self.make_image(state_index, channel_type=\"n\")\n return np.array([np.sum(arr) for arr in image])[3:].reshape(8, 12)", "def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, \"\n \"format='%s', itemsize=%s, flags=%s)\" %\n (x, nd.shape, nd.strides, nd.suboffsets, offset,\n nd.format, nd.itemsize, flags))\n sys.stdout.flush()", "def display_napari(pos_img):\n global data\n global img_queue\n if pos_img is None:\n return\n # read image and z position\n image = np.reshape(pos_img[2:],(clip[0], clip[1]))\n z_pos = pos_img[1]\n color = pos_img[0]\n\n # write image into correct slice of data and update display\n data[z_pos] = np.squeeze(image)\n layer = viewer.layers[color]\n layer.data = data\n #print(\"updating \", z_pos, color)\n\n img_queue.task_done()", "def to_nii(self, outbase, spirec='spirec', saveInOut=False):\n if self.image_data is None:\n self.recon(spirec)\n\n image_tlhc = np.array([self.header.image.tlhc_R, self.header.image.tlhc_A, self.header.image.tlhc_S])\n image_trhc = np.array([self.header.image.trhc_R, self.header.image.trhc_A, self.header.image.trhc_S])\n image_brhc = np.array([self.header.image.brhc_R, self.header.image.brhc_A, self.header.image.brhc_S])\n #image_cent = np.array([self.header.image.ctr_R, self.header.image.ctr_A, self.header.image.ctr_S])\n\n row_vec = (image_trhc-image_tlhc)/np.sqrt(np.dot(image_trhc-image_tlhc, image_trhc-image_tlhc))\n col_vec = -(image_trhc-image_brhc)/np.sqrt(np.dot(image_trhc-image_brhc, image_trhc-image_brhc))\n # The DICOM standard defines these two unit vectors in an LPS coordinate frame, but we'll\n # need RAS (+x is right, +y is anterior, +z is superior) for NIFTI. So, we compute them\n # such that row_vec points to the right and col_vec points up.\n # Not sure if we need to negate the slice_norm. From the NIFTI-1 header:\n # The third column of R will be either the cross-product of the first 2 columns or\n # its negative. It is possible to infer the sign of the 3rd column by examining\n # the coordinates in DICOM attribute (0020,0032) \"Image Position (Patient)\" for\n # successive slices. However, this method occasionally fails for reasons that I\n # (RW Cox) do not understand.\n\n # can also get slice_norm from: slice_norm = np.cross(row_vec, col_vec)\n slice_norm = np.array([self.header.image.norm_R, self.header.image.norm_A, self.header.image.norm_S])\n slice_fov = np.abs(self.header.series.start_loc - self.header.series.end_loc)\n\n # This is either the first slice tlhc (image_tlhc) or the last slice tlhc. How to decide?\n # And is it related to wheather I have to negate the slice_norm?\n # Tuned this empirically by comparing spiral and EPI data with the sam Rx.\n # Everything seems reasonable, except the test for axial orientation (start_ras==S|I).\n # I have no idea why I need that! But the flipping only seems necessary for axials, not\n # coronals or the few obliques I've tested.\n # FIXME: haven't tested sagittals! (to test for spiral: 'sprt' in self.psd_name.lower())\n if (self.header.series.start_ras=='S' or self.header.series.start_ras=='I') and self.header.series.start_loc > self.header.series.end_loc:\n pos = image_tlhc - slice_norm*slice_fov\n # FIXME: since we are reversing the slice order here, should we change the slice_order field below?\n self.image_data = self.image_data[:,:,::-1,]\n if self.fm_data is not None:\n self.fm_data = self.fm_data[:,:,::-1,]\n else:\n pos = image_tlhc\n\n if self.num_bands > 1:\n pos = pos - slice_norm * self.band_spacing_mm * (self.num_bands - 1.0) / 2.0\n\n qto_xyz = np.zeros((4,4))\n qto_xyz[0,0] = row_vec[0]\n qto_xyz[0,1] = col_vec[0]\n qto_xyz[0,2] = slice_norm[0]\n\n qto_xyz[1,0] = row_vec[1]\n qto_xyz[1,1] = col_vec[1]\n qto_xyz[1,2] = slice_norm[1]\n\n qto_xyz[2,0] = row_vec[2]\n qto_xyz[2,1] = col_vec[2]\n qto_xyz[2,2] = slice_norm[2]\n\n qto_xyz[:,3] = np.append(pos, 1).T\n qto_xyz[0:3,0:3] = np.dot(qto_xyz[0:3,0:3], np.diag(self.mm_per_vox))\n\n nii_header = nibabel.Nifti1Header()\n nii_header.set_xyzt_units('mm', 'sec')\n nii_header.set_qform(qto_xyz, 'scanner')\n nii_header.set_sform(qto_xyz, 'scanner')\n\n nii_header['slice_start'] = 0\n nii_header['slice_end'] = self.num_slices - 1\n # nifti slice order codes: 0 = unknown, 1 = sequential incrementing, 2 = seq. dec., 3 = alternating inc., 4 = alt. dec.\n slice_order = 0\n nii_header['slice_duration'] = self.tr * 1000 / self.num_slices\n # FIXME: check that this is correct.\n if self.header.series.se_sortorder == 0:\n slice_order = 1 # or 2?\n elif self.header.series.se_sortorder == 1:\n slice_order = 3 # or 4?\n nii_header['slice_code'] = slice_order\n\n # Note: the freq/phase dir isn't meaningful for spiral trajectories.\n if self.header.image.freq_dir==1:\n nii_header.set_dim_info(freq=1, phase=0, slice=2)\n else:\n nii_header.set_dim_info(freq=0, phase=1, slice=2)\n\n # FIXME: There must be a cleaner way to set the TR! Maybe bug Matthew about it.\n nii_header.structarr['pixdim'][4] = self.tr\n nii_header.set_slice_duration(nii_header.structarr['pixdim'][4] / self.num_slices)\n nii_header.structarr['cal_max'] = self.image_data.max()\n nii_header.structarr['cal_min'] = self.image_data.min()\n\n if self.num_echoes == 1:\n nifti = nibabel.Nifti1Image(self.image_data, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n elif self.num_echoes == 2:\n if saveInOut:\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,0], None, nii_header)\n nibabel.save(nifti, outbase + '_in.nii.gz')\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,1], None, nii_header)\n nibabel.save(nifti, outbase + '_out.nii.gz')\n # FIXME: Do a more robust test for spiralio!\n # Assume spiralio, so do a weighted average of the two echos.\n # FIXME: should do a quick motion correction here\n w_in = np.mean(self.image_data[:,:,:,:,0], 3)\n w_out = np.mean(self.image_data[:,:,:,:,1], 3)\n inout_sum = w_in + w_out\n w_in = w_in / inout_sum\n w_out = w_out / inout_sum\n avg = np.zeros(self.image_data.shape[0:4])\n for tp in range(self.image_data.shape[3]):\n avg[:,:,:,tp] = w_in*self.image_data[:,:,:,tp,0] + w_out*self.image_data[:,:,:,tp,1]\n nifti = nibabel.Nifti1Image(avg, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n else:\n for echo in range(self.num_echoes):\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,echo], None, nii_header)\n nibabel.save(nifti, outbase + '_echo%02d.nii.gz' % echo)\n\n if self.fm_data is not None:\n nii_header.structarr['cal_max'] = self.fm_data.max()\n nii_header.structarr['cal_min'] = self.fm_data.min()\n nifti = nibabel.Nifti1Image(self.fm_data, None, nii_header)\n nibabel.save(nifti, outbase + '_B0.nii.gz')", "def create_displayable_test_output(test_image):\n if hasattr(test_image, \"numpy\"):\n return np.squeeze(test_image.numpy())[:, :, 1:]\n else:\n return np.squeeze(test_image)[:, :, 1:]", "def show_image(self, pic, prediction=None):\n digitmap = {\n 0: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 1: [(0,2), (1,2), (2,2), (3,2), (4,2)],\n 2: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,0), (4,0), (4,1), (4,2)],\n 3: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 4: [(0,0), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,2)],\n 5: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 6: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,0), (3,2), (4,0), (4,1), (4,2)],\n 7: [(0,0), (0,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 8: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2), (2,1)],\n 9: [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)]\n }\n\n pic = pic.reshape((28,28)).copy()\n if prediction is not None:\n for pos in digitmap[prediction]:\n pic[pos]=255\n plt.imshow(pic, cmap='gray_r')", "def print_bitmap(self, w, h, image):\n\n bitmap = self._pack_bitmap(w, h, image)\n\n row_bytes = (w + 7) // 8 # Round up to next byte boundary\n\n if row_bytes >= 48:\n row_bytes_clipped = 48\n else:\n row_bytes_clipped = row_bytes # 384 pixels max width\n\n # Est. max rows to write at once, assuming 256 byte printer buffer.\n if self._dtr_enabled:\n chunk_height_limit = 255 # Buffer doesn't matter, handshake!\n else:\n chunk_height_limit = 256 // row_bytes_clipped\n if chunk_height_limit > self._max_chunk_height:\n chunk_height_limit = self._max_chunk_height\n elif chunk_height_limit < 1:\n chunk_height_limit = 1\n\n row_start = 0\n i = 0\n while row_start < h:\n # Issue up to chunkHeightLimit rows at a time:\n chunk_height = h - row_start\n if chunk_height > chunk_height_limit:\n chunk_height = chunk_height_limit\n\n self.write(self.ASCII_DC2, '*', chunk_height, row_bytes_clipped)\n\n y = 0\n while y < chunk_height:\n x = 0\n while x < row_bytes_clipped:\n self.timeout_wait()\n self._send_to_printer(int(bitmap[i]))\n x += 1\n i += 1\n\n y += 1\n\n i += row_bytes - row_bytes_clipped\n\n self.timeout_set(chunk_height * self._dot_print_time)\n\n row_start += chunk_height_limit\n\n self._prev_byte = '\\n'", "def long_slice(image_data):\n\n\t# Process binary data and open.\n\tim = image_data.split('base64,')[1]\n\tim = base64.b64decode(im)\n\tim = io.BytesIO(im)\n\t\n\timg = Image.open(im)\n\twidth, height = img.size\n\tupper = 0\n\tleft = 0\n\t\n\t# Max height to fit pdf.\n\tmax_height_mm = 198\n\tmax_height = (max_height_mm * 96) / 25.4\n\n\tslice_size = max_height\n\n\tslices = int(math.ceil(height/slice_size))\n\tcount = 1\n\n\tfinal_slices = []\n\tfor slice in range(slices):\n\t\t# If no more slices needed, set the lower bound to bottom of image.\n\t\tif count == slices:\n\t\t\tlower = height\n\t\telse:\n\t\t\tlower = int(count * slice_size) \n\t\t \n\t\t# Set the bounding box. \n\t\tbbox = (left, upper, width, lower)\n\t\tworking_slice = img.crop(bbox)\n\n\t\t# Save png as bytes object.\n\t\tbyte_io = io.BytesIO()\n\t\tworking_slice.save(byte_io, 'png')\n\t\t\n\t\t# Convert bytes object to base64 string and save to list.\n\t\timg_str = base64.b64encode(byte_io.getvalue())\n\t\timg_str = 'data:image/png;base64,' + img_str.decode()\n\t\tfinal_slices.append(img_str)\n\n\t\tupper = upper + slice_size\n\t\tcount = count + 1\n\n\treturn final_slices", "def muestraPokemon(bytes):\n image = Image.open(io.BytesIO(bytes))\n data = np.array(image)\n plt.imshow(data)\n plt.axis('off')\n plt.show()", "def convertData(img):\n dataset = []\n for i in img:\n dataset.append(format(ord(i), '08b'))\n return dataset", "def img_to_ascii(**kwargs):\n ascii_chars = [ u'Z', u'Q', u'T', u'W', u'E', u'K', u'P', u'L', u'I', u'C', u'Y']\n \n width = kwargs.get('width',200)\n path = kwargs.get('path',None)\n\n\n\n im = Image.open(path)\n\n im = resize(im,width)\n\n # w,h = im.size\n\n # this is used as storage. It stores the original picture's color values\n objToGo = list(im.convert(\"RGBA\").getdata())\n\n im = im.convert(\"L\") # convert to grayscale\n\n imlist = list(im.getdata())\n\n i = 0\n j = 0\n # chList is the characters that will be printed. It is a 2D array\n chList = []\n chList.append([])\n for val in imlist:\n ch = ascii_chars[val // 25] #.decode('utf-8')\n chList[j].append(ch)\n sys.stdout.write(ch)\n i += 1\n if i % width == 0:\n sys.stdout.write(\"\\n\")\n chList.append([])\n j += 1\n i = 0\n\n return chList,objToGo", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n self.command(\n const.COLUMNADDR, 0x00, self.width-1, # Column start/end address\n const.PAGEADDR, 0x00, self.pages-1) # Page start/end address\n\n pix = list(image.getdata())\n step = self.width * 8\n buf = []\n for y in xrange(0, self.pages * step, step):\n i = y + self.width-1\n while i >= y:\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[i + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n i -= 1\n\n self.data(buf)", "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def snapshot(self):\n text = \"\"\n text += \"{}:\\n{}\\n\".format('chi', np.array2string(self.chi))\n return text", "def get_itk_data(path_or_image, verbose=False):\n\n if isinstance(path_or_image, str):\n image = get_itk_image(path_or_image)\n else:\n image = path_or_image\n\n arr = itk.GetArrayFromImage(image)\n shape = arr.shape\n spacing = image.GetSpacing()[::-1]\n data_type = arr.dtype\n\n if verbose:\n print '\\t image shape: ' + str(shape)\n print '\\t image spacing: ' + str(spacing)\n print '\\t image data type: ' + str(data_type)\n\n return arr, shape, spacing", "def dumpData(self,out,index):\n #--SCVR\n out.pack('4siBB2sB',\n 'SCVR', 5+len(self.text), index+48, self.type, self.func, self.oper)\n if self.text: out.write(self.text)\n #--Value\n if isinstance(self.value,int):\n out.packSub('INTV','i', self.value)\n else:\n out.packSub('FLTV','f', self.value)", "def __str__(self):\n out = ' '\n for j in range(self.width):\n out += str(j + 1) + ' '\n out += '\\n'\n for i in range(self.height):\n out += '\\n'\n out += str(i + 1) + ' '\n for j in range(self.width):\n out += self.data[i][j] + ' '\n return out", "def explore_data(dataset, start, end, rows_and_columns=False):\r\n for i in range(start,end):\r\n print(dataset[i],end=\"\\n\")", "def get_image_summary(img, idx=0):\n\n V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1))\n V -= tf.reduce_min(V)\n V /= tf.reduce_max(V)\n V *= 255\n\n img_w = tf.shape(img)[1]\n img_h = tf.shape(img)[2]\n V = tf.reshape(V, tf.stack((img_w, img_h, 1)))\n V = tf.transpose(V, (2, 0, 1))\n V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1)))\n return V", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def print_data(self, data):\n self.print_indicator = True\n self.imagedata = data\n self.setImage(self.imagedata)\n\n self.indicator_min = -200\n self.indicator_max = 200\n\n if self.video_model != None:\n pos = int(self.video_model.get_pos(datatype = \"motion\"))\n self.indicator = self.view.plot([pos,pos],[self.indicator_min,self.indicator_max],pen=pyqtgraph.mkPen(color=pyqtgraph.hsvColor(2),width=1))", "def printFactoryImageStruct(processedImagePath, trailerSize, numLinesImageContent, descripFixedSize):\n\n cuttingLine = \"--------\"\n subCuttingLine = \"-----\"\n\n # print magic code\n with open(processedImagePath, \"rb\") as f:\n print(cuttingLine + subCuttingLine + \"Magic Code\" + subCuttingLine + cuttingLine)\n byte = f.read(8)\n print(byte)\n\n # print [ota_descriptor + image content]\n printOTADescriptorImageStruct(processedImagePath, numLinesImageContent, 8)\n\n # print trailer\n fSize = getFileSize(processedImagePath)\n f = open(processedImagePath, \"rb\")\n f.seek(fSize - trailerSize)\n\n print(cuttingLine + cuttingLine + \"Trailer\" + cuttingLine + cuttingLine)\n\n try:\n print(subCuttingLine + \" signature type \" + subCuttingLine)\n byte = f.read(descripFixedSize)\n print(byte)\n\n print(subCuttingLine + \" signature size \" + subCuttingLine)\n byte = f.read(4)\n print(format32BitHexStr(hex(struct.unpack('<I', byte)[0])))\n\n byte = f.read()\n print(subCuttingLine + \" signature \" + subCuttingLine)\n print(byte)\n\n finally:\n f.close()", "def print_image_info(self):\r\n\r\n maxt = np.max(self.times)\r\n\r\n print (\" Duration of Image Stack: %9.3f s (%8.3f min) period = %8.3f s\" % (maxt, maxt/60.0, self.period))\r\n\r\n print (' Image shape: ', self.imageData.shape)\r\n\r\n print (' nFrames: %d framerate: %9.3f\\n' % (self.nFrames, self.framerate))", "def imageToArray(i):\r\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\r\n a.shape=i.im.size[1], i.im.size[0]\r\n return a", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\tprint(data_chunk.shape)\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def showTensorImg(ts, title):\n img = np.transpose(ts, (1, 2, 0))\n showImg(img, title)\n return", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: Any):\n raise NotImplementedError", "def get_image():\r\n\t\treturn \"\\n\".join(\"\".join(row) for row in picture)", "def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)", "def _repr_html_(self):\n\n import numpy as np\n import matplotlib.pyplot as plt\n from .._tier9 import imshow\n\n\n size_in_pixels = np.prod(self.shape)\n size_in_bytes = size_in_pixels * self.dtype.itemsize\n\n labels = (self.dtype == np.uint32)\n\n # In case the image is 2D, 3D and larger than 100 pixels, turn on fancy view\n if len(self.shape) in (2, 3) and size_in_pixels >= 100:\n import matplotlib.pyplot as plt\n imshow(self,\n labels=labels,\n continue_drawing=True,\n colorbar=not labels)\n image = self._png_to_html(self._plt_to_png())\n else:\n return \"<pre>cle.array(\" + str(np.asarray(self)) + \", dtype=\" + str(self.dtype) + \")</pre>\"\n\n\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n size = \"{:.1f}\".format(size_in_bytes) + \" GB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" MB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" kB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" B\"\n\n histogram = \"\"\n\n if size_in_bytes < 100 * 1024 * 1024:\n if not labels:\n\n import numpy as np\n from .._tier2 import minimum_of_all_pixels, maximum_of_all_pixels\n from .._tier3 import histogram\n\n num_bins = 32\n\n h = np.asarray(histogram(self, num_bins=num_bins))\n\n plt.figure(figsize=(1.8, 1.2))\n plt.bar(range(0, len(h)), h)\n\n # hide axis text\n # https://stackoverflow.com/questions/2176424/hiding-axis-text-in-matplotlib-plots\n # https://pythonguides.com/matplotlib-remove-tick-labels\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([])\n frame1.axes.yaxis.set_ticklabels([])\n plt.tick_params(left=False, bottom=False)\n\n histogram = self._png_to_html(self._plt_to_png())\n\n min_max = \"<tr><td>min</td><td>\" + str(self.min()) + \"</td></tr>\" + \\\n \"<tr><td>max</td><td>\" + str(self.max()) + \"</td></tr>\"\n\n else:\n\n min_max = \"\"\n\n all = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style=\\\"text-align: center; vertical-align: top;\\\">\",\n \"<b><a href=\\\"https://github.com/clEsperanto/pyclesperanto_prototype\\\" target=\\\"_blank\\\">cle._</a> image</b><br/>\",\n \"<table>\",\n \"<tr><td>shape</td><td>\" + str(self.shape).replace(\" \", \"&nbsp;\") + \"</td></tr>\",\n \"<tr><td>dtype</td><td>\" + str(self.dtype) + \"</td></tr>\",\n \"<tr><td>size</td><td>\" + size + \"</td></tr>\",\n min_max,\n \"</table>\",\n histogram,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n\n return \"\\n\".join(all)", "def array2ipl(img): \n img_new = cv.CreateImageHeader((img.shape[1], img.shape[0]), cv.IPL_DEPTH_8U, 3)\n cv.SetData(img_new, img.copy().data,img.dtype.itemsize*3*img.shape[1])\n img_new[50,75]\n return img_new", "def pildumps(image, format=\"PNG\"):\n result = StringIO.StringIO()\n if image.dtype in [np.dtype('f'), np.dtype('d')]:\n assert np.amin(image) > -0.001 and np.amax(image) < 1.001\n image = np.clip(image, 0.0, 1.0)\n image = np.array(image * 255.0, 'uint8')\n PIL.Image.fromarray(image).save(result, format=format)\n return result.getvalue()", "def picture_binary(runtime_addr, n=1):\n\n set_formatter(runtime_addr, n, mainformatter.picture_binary_formatter)", "def imdisplay(filename, representation):\n\n image = read_image(filename, representation)\n plt.imshow(image, cmap=\"gray\")\n plt.show()", "def DebugFormat(self):\n print FormatAsBits((self.output, self.out_boff))\n for i in xrange(self.idx_byte*8 + self.idx_boff - 1):\n if not i % 8:\n sys.stdout.write(\"|\")\n sys.stdout.write(\"-\")\n print \"^\"", "def print_image_info(image, resize=rsz_default, kernel=kernel_size):\n\tprint \"Image Size: {0}\".format(image.shape)\n\tprint \"Image Max: {0}\".format(image.max())\n\tprint \"Image Min: {0}\".format(image.min())\n\tprint \"Image Mean: {0}\".format(image.mean())\n\tprint \"Image dtype: {0}\\n\".format(image.dtype)\n\timage = to_uint8(image)\n\timage_prep = preprocess(image, resize=resize, kernel=kernel)\n\tcontour = get_contour(image_prep)\n\tM = get_image_moments(contour=contour)\n\tsecond_m = ['m20', 'm11', 'm02', 'm30', 'm21', 'm12', 'm03']\n\tprint \"Zero Order Moment: {0}\".format(M['m00'])\n\tprint \"First Order Moments: {0}, {1}\".format(M['m10'], M['m01'])\n\tprint \"Second Order Moments:\"\n\tsecond_m_str = ''\n\tfor m2 in second_m:\n\t\tsecond_m_str += \"{0},\".format(M[m2])\n\tprint second_m_str[:-1]", "def observation(self, img):\r\n img = img.transpose(1, 2, 0)\r\n return img", "def printData (data):\n print(str(len(data)) + '\\t' + str(data))", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def showBPImg(pV,nV):\n # object arrays of the positive and negative images\n inv_crop = np.empty(8, dtype=object)\n inv_crop2 = np.empty(8, dtype=object)\n for t in range(8):\n # backprojection functions\n inverse = retina.inverse(pV[:,t,:],x,y,dcoeff[i],dloc[i], GI, imsize=imgsize,rgb=True)\n inv_crop[t] = retina.crop(inverse,x,y,dloc[i])\n inverse2 = retina.inverse(nV[:,t,:],x,y,dcoeff[i],dloc[i], GI, imsize=imgsize,rgb=True)\n inv_crop2[t] = retina.crop(inverse2,x,y,dloc[i])\n # place descriptions\n cv2.putText(inv_crop[t],types[t] + \" + \",(xx,yy), font, 1,(0,255,255),2)\n cv2.putText(inv_crop2[t],types[t] + \" - \",(xx,yy), font, 1,(0,255,255),2)\n # stack all images into a grid\n posRG = np.vstack((inv_crop[:4]))\n negRG = np.vstack((inv_crop2[:4]))\n posYB = np.vstack((inv_crop[4:]))\n negYB = np.vstack((inv_crop2[4:]))\n merge = np.concatenate((posRG,negRG,posYB,negYB),axis=1)\n return merge", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def info(self):\n\n\t\tprint(\"Pixels on a side: {0}\".format(self.data.shape[0]))\n\t\tprint(\"Pixel size: {0}\".format(self.resolution))\n\t\tprint(\"Total angular size: {0}\".format(self.side_angle))\n\t\tprint(\"lmin={0:.1e} ; lmax={1:.1e}\".format(self.lmin,self.lmax))", "def print_wrapped(data, ncols=3):\r\n nrows = len(data)\r\n labels = data.index\r\n n_split_rows = int(np.ceil(nrows / ncols))\r\n for r in range(0, nrows, ncols):\r\n for c in range(ncols):\r\n try:\r\n numstr = '{}'.format(data[r + c])\r\n tabs = [' '] * (20 - len(labels[r + c]) - len(numstr))\r\n print(labels[r + c] + \"\".join(tabs) + numstr, end='\\t')\r\n except:\r\n pass\r\n print()", "def __repr__(self) -> str:\n\n thresh = np.get_printoptions()[\"threshold\"]\n np.set_printoptions(threshold=20)\n extra_chars = len(self.__class__.__name__)\n arr_str = \"data=\" + str(self.data).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 6))\n shape_str = (\n \" \" * extra_chars\n + \" shape=\"\n + str(self.shape).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 7))\n )\n dtype_str = \" \" * extra_chars + \" dtype=\" + str(self.dtype)\n np.set_printoptions(threshold=thresh)\n return \"{klass}({data},\\n{shape},\\n{dtype})\".format(\n klass=self.__class__.__name__,\n data=arr_str,\n shape=shape_str,\n dtype=dtype_str,\n )", "def img_to_slices(img):\n res = []\n\n for i, slice_img in enumerate(img):\n res.append(slice_img)\n return res", "def pngxy(data):\n ihdr = data.index(b'IHDR')\n # next 8 bytes are width/height\n w4h4 = data[ihdr+4:ihdr+12]\n return struct.unpack('>ii', w4h4)", "def print_stats(cars, notcars):\n print(\"Number of car samples: {0}\".format(len(cars)))\n print(\"Number of non car samples: {0}\".format(len(notcars)))\n img = cv2.imread(cars[0])\n print(\"Image shape: {0}x{1}\".format(img.shape[0], img.shape[1]))\n print(\"Image datatype: {}\".format(img.dtype))", "def __repr__(self):\n lstout = [\"Azimuthal Integrator:\", self.ai.__repr__(),\n \"Input image shape: %s\" % list(self.shapeIn),\n \"Number of points in radial direction: %s\" % self.nbpt_rad,\n \"Number of points in azimuthal direction: %s\" % self.nbpt_azim,\n \"Unit in radial dimension: %s\" % self.unit.REPR,\n \"Correct for solid angle: %s\" % self.correct_solid_angle,\n \"Polarization factor: %s\" % self.polarization,\n \"Dark current image: %s\" % self.dark_current_image,\n \"Flat field image: %s\" % self.flat_field_image,\n \"Mask image: %s\" % self.mask_image,\n \"Dummy: %s,\\tDelta_Dummy: %s\" % (self.dummy, self.delta_dummy),\n \"Directory: %s, \\tExtension: %s\" % (self.subdir, self.extension)]\n return os.linesep.join(lstout)", "def write(self, image):\n raise NotImplementedError()", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def long_slice(image_path, out_name, outdir, slice_size, net):\n img = Image.open(image_path)\n imgout = Image.open(image_path)\n orw, orh = img.size\n width, height = img.size\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n orw, orh = imgout.size\n width, height = img.size\n print(img.size)\n r = 1\n draw = ImageDraw.Draw(imgout)\n\n flag_continue = True\n while flag_continue:\n if os.path.exists(\"./testsliceimage/list.txt\"):\n os.remove(\"./testsliceimage/list.txt\")\n file = open(\"./testsliceimage/list.txt\", \"w+\")\n for sliceh in range(slicesh*step):\n for slicew in range(slicesw*step):\n #set the bounding box! The important bit\n bbox = (int(slicew*slice_size/step), int(sliceh*slice_size/step), int(slicew*slice_size/step)+slice_size, int(sliceh*slice_size/step)+slice_size)\n working_slice = img.crop(bbox)\n\n working_slice.save(os.path.join(outdir, \"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\"))\n file.write(\"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\\n\")\n\n if sliceh == 16 and slicew == 27 and width == 450 :\n print (int(slicew*slice_size/step), int(sliceh*slice_size/step),int(slicew*slice_size/step)+slice_size,int(sliceh*slice_size/step)+slice_size)\n\n file.close()\n transform_test = tf.Compose([tf.Grayscale(), tf.ToTensor(), tf.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n testset = UnknownDataset(\"./testsliceimage/\", \"./testsliceimage/list.txt\", transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=WORKERS)\n\n with torch.no_grad():\n N = 0\n for data in testloader:\n images, img_names = data['image'], data['image_name']\n outputs = net(images.float())\n _, predicted = torch.max(outputs.data, 1)\n # print(predicted)\n if max(predicted) == 1 :\n ite = -1\n for predic in predicted :\n ite += 1\n if predic == 1 and outputs[ite][1]-outputs[ite][0] > CONFIDENCE:\n print(img_names[ite])\n # print(outputs)\n N += 1\n #dessiner carre sur image\n slh = int(img_names[ite].split('_')[4])\n slw = int(img_names[ite].split('_')[5][:-4])\n x1 = int(slh * slice_size / step)\n x2 = x1 + slice_size\n y1 = int(slw * slice_size / step)\n y2 = y1 + slice_size\n\n if slh == 16 and slw == 27 and width ==450 :\n print (x1, y1, x2, y2)\n\n print(r)\n rh = orh / height\n rw = orw / width\n x1 = x1 * rh\n x2 = x2 * rh\n y1 = y1 * rw\n y2 = y2 * rw\n\n draw.rectangle(((y1, x1), (y2, x2)), outline=\"red\")\n # draw.text((y2,x2), img_names[0])\n copyfile(\"./testsliceimage/\"+img_names[ite], \"./goodimage/\"+ img_names[ite])\n\n if width <= 200 or height <= 200:\n flag_continue = False\n else:\n r = r * scale\n width, height = int(width/scale), int(height/scale)\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n width, height = img.size\n\n # imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout.save(\"./rectangle/out\", \"PNG\")", "def display_mask(i):\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)", "def array_to_imagefile(data, imagefname,verbose=False):\n if data.ndim == 2:\n data = np.dstack([data,data,data])\n data = np.rollaxis(data,-1)\n # print(data.shape)\n img = Image.fromarray(np.uint8(np.rollaxis(np.rollaxis(data,-1),-1)))\n if data.ndim == 2:\n if data.shape[3] == 3:\n img = img.convert(mode=\"RGB\")\n img.mode='RGB'\n if data.shape[3] == 4:\n img = img.convert(mode=\"RGBA\")\n img.mode='RGBA'\n \n \n if verbose:\n print(\"saving \", os.path.realpath(imagefname))\n img.save(imagefname)\n return 1", "def dump(self):\n res = []\n #res.append(\"Submeshes: %d\" % len(self.submeshes))\n #res.append(\"IdxBuf: 0x%04X bytes\" % len(self.idx_buf))\n #res.append(\"PrimFmt: 0x%04X (%s)\" % (\n # self.prim_fmt_id, self.prim_fmt))\n #res.append(\"IdxType: 0x%02X (%s)\" % (\n # self.header['idx_type'], self.idx_fmt,\n #))\n #res.append(\"IdxCnt: %d\" % self.header['idx_cnt'])\n #res.append(\"VisGrp: %d\" % self.header['visibility_group'])\n #res.append(\"Unknown: 0x%08X 0x%08X 0x%08X\" % (\n # self.header['unk08'],\n # self.header['unk10'],\n # self.header['unk34'],\n #))\n #return '\\n'.join(res).replace('\\n', '\\n ')\n\n return \"%4d│%04X│%04X %-24s│%02X %s│%5d│%5d│%08X│%08X│%08X\" %(\n len(self.submeshes),\n len(self.idx_buf),\n self.prim_fmt_id, self.prim_fmt,\n self.header['idx_type'], self.idx_fmt,\n self.header['idx_cnt'],\n self.header['visibility_group'],\n self.header['unk08'], self.header['unk10'],\n self.header['unk34'],\n )" ]
[ "0.7489223", "0.7313441", "0.72980666", "0.7102297", "0.70125145", "0.6931557", "0.6733379", "0.67101216", "0.6534789", "0.65026045", "0.6426267", "0.62820506", "0.6222731", "0.6221045", "0.6040338", "0.6038878", "0.59879136", "0.594753", "0.593031", "0.59290165", "0.5902861", "0.5901005", "0.58831966", "0.5843231", "0.5829365", "0.5817032", "0.57431644", "0.5734932", "0.5705508", "0.56489563", "0.56461674", "0.5642296", "0.5609125", "0.554101", "0.5526995", "0.55046344", "0.5500855", "0.5467998", "0.54609543", "0.5448183", "0.5437958", "0.5437958", "0.53989595", "0.53944707", "0.538913", "0.538165", "0.5374208", "0.53598195", "0.53281003", "0.5306479", "0.52997947", "0.5284983", "0.5283382", "0.5280487", "0.5276586", "0.5262465", "0.5246497", "0.5238559", "0.52353954", "0.5234481", "0.52294546", "0.5223248", "0.5213322", "0.5213182", "0.51871854", "0.51869494", "0.51867616", "0.51859", "0.5184383", "0.51746356", "0.5166782", "0.51604223", "0.51564765", "0.5149928", "0.5147554", "0.5139443", "0.5137101", "0.5134098", "0.51331824", "0.51315427", "0.5130958", "0.51268965", "0.51256216", "0.5116639", "0.51144326", "0.51092196", "0.5107813", "0.51023126", "0.50983936", "0.5096376", "0.50933707", "0.5092595", "0.5081676", "0.50743556", "0.50743556", "0.50729823", "0.5059755", "0.50563335", "0.505438", "0.50506693" ]
0.6602657
8
Read a columnlisted txt file.
def read_text_row(fnam, format="", skip=";"): from string import split inf = file(fnam, "r") strg = inf.readline() x = [] data = [] while (len(strg) > 0): com_line = False for j in xrange(len(strg)): if(strg[j] == skip): com_line = True if com_line == False: word=split(strg) if format == "s" : key = int(word[1]) if key != len(word) - 2: del word word = [] word.append(strg[0 : 5]) word.append(strg[6 : 7]) for k in xrange(key): k_start = 7 + k*13 k_stop = k_start + 13 word.append(strg[k_start : k_stop]) line=[] for i in xrange(len(word)): line.append(float(word[i])) data.append(line) strg=inf.readline() inf.close return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_text_file(file_name, ncol = 0):\n\t\n\tfrom string import split\n\tinf = file(file_name, \"r\")\n\tline = inf.readline()\n\tdata = []\n\twhile len(line) > 0:\n\t\tif ncol == -1:\n\t\t\tvdata = split(line)\n\t\t\tif data == []:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata.append([float(vdata[i])])\n\t\t\telse:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata[i].append(float(vdata[i]))\t\t\t\n\t\telse:\n\t\t\tvdata = float(split(line)[ncol])\n\t\t\tdata.append(vdata)\n\t\tline = inf.readline()\n\treturn data", "def readFile(fname):\n\n fromto = []\n cols = []\n with open(fname , 'r') as f:\n cols = f.readline().split(\",\")[0:4] # Headline\n for line in f:\n tm, frm, to, am = line.split(\",\")[0:4]\n frm = int(frm.lstrip())\n to = int(to.lstrip())\n fromto.append((frm,to))\n return cols, fromto", "def load_n_col(file):\n df = pd.read_csv(file, delimiter=\" \", header=None)\n columns = [list(df[col]) for col in df]\n return columns", "def readLines(filename, col=None):\n with open(filename, \"r\") as f:\n lines = f.readlines()\n lines = [ s.rstrip(\"\\n\\r\") for s in lines ]\n if col == None:\n return lines\n else:\n return [ s.split(\"\\t\")[col] for s in lines ]", "def read_column(file_name, column_number):\n flist = []\n empty_lines = 0\n fread = open(file_name,'r')\n for line in fread:\n chompedLine = line.rstrip()\n if not chompedLine:\n empty_lines += 1\n continue\n flist.append(float(chompedLine.split()[column_number-1]))\n\n return flist", "def readOFColumnData(dataFile,nCol):\n fileCheck(dataFile) # does the file exists ? Stop if not.\n #\n # Init list\n data = []\n #\n for line in fileinput.input(dataFile):\n # remove parenthesis if any\n line = line.replace('(', '')\n line = line.replace(')', '') \n # divide each element of the line into words\n words = line.split()\n if words: # if there is a line in fact\n if words[0][0]!='#': #do something only if not comment \n data.append(float(words[nCol])) \n # \n return data", "def read_file(infile,column_num):\n\n \n column_list = []\n\n with open(infile,'r') as f:\n\n fl = f.readlines()\n\n for line in fl:\n \n \n value = int(line.split()[int(column_num)-1])\n column_list.append(value)\n\n\n return column_list", "def read_users(file_name):\n f = open(file_name, \"r\")\n header = f.readline()\n f.close()\n cols = [x.strip(\"\\\"\\n\") for x in header.split('\\t')]\n return cols[12:]", "def read_file_lines(filename, cols, skip=0, stop=-1, column_major=False, separator='[\\t ]'):\n\n # Set current directory\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n # Open file\n f = open(__location__ + '/' + filename, \"r\")\n\n # Read lines and skip initial lines if necessary\n lines = f.readlines()[skip:]\n\n # Select columns\n res = [[np.float64(line[col]) for col in cols] for line in [re.split(separator, l.strip()) for l in lines]]\n return np.transpose(res) if column_major else res", "def split_columns(file_path, separator=\"\\t\"):\n lines = fs.read_file(file_path)\n splitted = [ln.split(separator) for ln in lines]\n columns = zip(*splitted)\n\n return columns", "def read_slurm_file(filename, cols, skip=54, stop=-34, column_major=True):\n\n # Set current directory\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n # Open file\n f = open(__location__ + '/' + filename, \"r\")\n\n # Read lines and skip initial lines if necessary\n lines = f.readlines()[skip:stop]\n\n # Select columns\n res = [[np.float64(line[col]) for col in cols] for line in [l.split() for l in lines]]\n return np.transpose(res) if column_major else res", "def read_file(filename):\n field = []\n with open(filename, encoding='utf-8') as f:\n f.readline()\n for line in f:\n field.append(line[3:].split())\n return field", "def read_file(file_name):\n\twith open(file_name, 'r') as file:\n\t\tdata = file.read().split('\\n')\n\t\n\treturn list(map(lambda x: x.split('\\t'), data))", "def get_column_from_file(file_name, column_number):\n\n file = open(file_name, 'r')\n column_values = []\n for line in file:\n row_values = [int(value.strip()) for value in line.split()]\n column_values.append(row_values[column_number])\n\n return column_values", "def readTab(file_name):\n data = []\n meta = []\n l=0\n for line in open(file_name):\n if l<3:\n meta.append(line.strip(\"\\n\").split(\"\\t\"))\n else:\n if len(line.strip(\"\\n\").split(\"\\t\")) == len(meta[0]):\n data.append(line.strip(\"\\n\").split(\"\\t\"))\n l += 1\n return (meta, data)", "def read_data_4_columns(filename=\"ripple_082-085.dat\"):\n # Process comment and header lines\n fileobj = open(filename, 'r')\n while True:\n s = fileobj.readline()\n if s.startswith('#'):\n print(s)\n continue\n elif s.startswith('h'):\n break\n else:\n print(\"Any comments (including an empty line) should start with #.\")\n print(\"Please fix your input file.\")\n sys.exit(1)\n \n # Go through data points \n h = []; k = []; q = []; F = []\n lines = fileobj.readlines()\n for line in lines:\n # This ignores an empty line\n line = line.rstrip()\n if not line: \n continue\n hval, kval, qval, Fval = line.split()\n h.append(int(hval))\n k.append(int(kval)) \n q.append(float(qval))\n F.append(float(Fval))\n return h, k, q, F", "def loadonecol(infile):\n slist = []\n with open(infile) as f:\n for line in f:\n line = line.strip()\n if line: # exclude blank line\n slist.append(line)\n return slist", "def read_file(self):\n colspecs = [[0, 7]] # for the id\n names = ['id']\n for question in self.question_list:\n colspecs.extend(question.get_column_range())\n names.extend(question.get_column_names())\n\n self.data = pd.read_fwf(self.file, colspecs=colspecs, encoding=self.encoding, names=names, header=None)\n self.data.fillna(0, inplace=True)\n self.data = self.data.astype(int)\n return self.data", "def read_data_6_columns(filename=\"ripple_082-085.dat\", skip=1):\n fileobj = open(filename, 'r')\n # ignore the first skip lines\n for i in range(skip):\n fileobj.readline()\n h = []; k = []; qr =[]; qz =[]; q = []; F = []\n lines = fileobj.readlines()\n for line in lines:\n hval, kval, rval, zval, qval, Fval = line.split()\n h.append(int(hval)) \n k.append(int(kval))\n qr.append(float(rval))\n qz.append(float(zval))\n q.append(float(qval))\n F.append(float(Fval)) \n return h, k, qr, qz, q, F", "def get_twisscolumns(tfsfile):\n cols = pd.read_csv(tfsfile, delim_whitespace=True, skiprows=range(46), nrows=2, index_col=None)\n return list(cols.columns[1:].values)", "def csv_style_file_reader(file_path, delimiter=None, column_title=True):\n the_file = open(file_path,'r')\n\n # This will be return\n column_titles = []\n\n #Get information from the first line\n first_line = the_file.readline()\n split_line = first_line.split(delimiter)\n nb_columns = len(split_line);\n\n # This will be return\n columns = [ [] for i in range(nb_columns) ]\n # Explaination: https://stackoverflow.com/questions/12791501/python-initializing-a-list-of-lists\n\n # Transfer the line in column titles\n if column_title:\n column_titles = split_line\n else:\n column_titles = [\"Column \"+ str(i) for i in range(nb_columns)];\n for i in range (nb_columns):\n columns[i].append( float(split_line[i]))\n\n for line in the_file:\n split_line = line.split(delimiter)\n if len(split_line) != 0:\n for i in range (nb_columns):\n columns[i].append( float(split_line[i]))\n\n return column_titles, columns", "def method4(fname):\n\t#jfrom cStringIO import StringIO\n\t#from tokenize import generate_tokens\n\timport re\n\tprint \"Method 4: read in files by line\"\n\tprint \"and rather than printing out all of it, only print out specific cols \"\n\tf = open(fname,\"r\")\n\tline = f.readline()\n\ti = 0 \n\t\n\twhile line != '':\n\t\ttmp= line.strip()\n\t\tif tmp :\n\t\t\t#print tmp\n\t\t\t#tmp = line.strip()\n\t\t\ttmpp = tmp.split()\n\t\t\t#i +=1\n\t\t\t#print len(tmpp)\n\t\t\tif len(tmpp) >1:\n\t\t\t\tprint tmpp[1]\n\t\t#tmp = line.split(' ')\n\t\t#i += 1\n\t\t#tmp = 'sdklsd sdjlks '\n\t\t#print len(tmp)\n\t\t#if len(tmp) > 1: \n\t\t\t#print tmp[1]\n\t\tline=f.readline()\n\t\n\tf.close()\n\tprint \"Method 4 done\"", "def get_csv_column(file_name, column):\n list = []\n with open('../test_files/' + file_name, 'r') as infile:\n for x in infile.readlines():\n x = x.replace('\\n', '')\n # splitting based on ',' that are encountered in csv files.\n #column-1 because the range start from 0 , so if user enters 1st column then its 0th column we need to fetch\n list.append(x.split(',')[column - 1])\n return list", "def read_file(filename):\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n try:\n label,content = line.strip().split('\\t')\n contents.append(list(content))\n labels.append(label)\n except:\n pass\n return contents,labels", "def djs_readcol(name,**kwargs):\n import re\n import numpy as np\n #\n # Number of lines\n #\n try:\n f = open(name,'r')\n except IOError:\n return None\n lines = f.readlines()\n f.close()\n nlines = len(lines)\n if 'silent' in kwargs:\n silent = True\n else:\n silent = False\n if 'debug' in kwargs:\n debug = True\n else:\n debug = False\n if debug:\n print(\"{0} contains {1} lines.\".format(name, nlines))\n if 'skip' in kwargs:\n skip = kwargs['skip']\n else:\n skip = 0\n nlines -= skip\n if 'numline' in kwargs:\n nlines = min(kwargs['numline'],nlines)\n #\n # Get the number of columns from the first non-skipped line\n #\n k = skip\n while lines[k][0] == '#':\n k += 1\n whitespace = re.compile(r'\\s+')\n baseline = lines[k].strip().replace(',',' ')\n basecols = whitespace.split(baseline)\n ncol = len(basecols)\n if 'format' in kwargs:\n if re.match(r'^\\(?[ABDFILX, ]+\\)?$',kwargs['format'],re.IGNORECASE) is None:\n print(\"Invalid format string!\")\n return None\n format = kwargs['format'].replace(' ','').upper().lstrip('(').rstrip(')').split(',')\n saveformat = [f for f in format if f != 'X']\n if len(format) < ncol:\n if not silent:\n print('Format string has fewer columns than the file.')\n ncol = len(format)\n else:\n #\n # Assume all floating point format\n #\n format = list('F'*ncol)\n saveformat = format\n if debug:\n print(','.join(format))\n nread = 0\n goodlist = list()\n for l in lines[skip:nlines]:\n nread += 1\n if debug:\n print(l)\n if len(l) < ncol or l[0] == '#':\n if not silent:\n print('Skipping line {0}'.format(skip+nread+1))\n continue\n #\n # Split the line\n #\n cols = whitespace.split(l.strip().replace(',',' '))\n savecols = [cols[k] for k in range(ncol) if format[k] != 'X']\n savelist = list()\n if len(savecols) == len(saveformat):\n for k in range(len(saveformat)):\n if saveformat[k] == 'A':\n #\n # Save strings as is.\n #\n saved = savecols[k]\n elif saveformat[k] == 'B' or saveformat[k] == 'I' or saveformat[k] == 'L':\n try:\n saved = int(savecols[k])\n except ValueError:\n #\n # Error, bad format, skip this line\n #\n if not silent:\n print('Skipping line {0}'.format(skip+nread+1))\n continue\n elif saveformat[k] == 'F' or saveformat[k] == 'D':\n try:\n saved = float(savecols[k])\n except ValueError:\n #\n # Error, bad format, skip this line\n #\n if not silent:\n print('Skipping line {0}'.format(skip+nread+1))\n continue\n else:\n print(\"Whoops, bad format! How did that happen?\")\n continue\n savelist.append(saved)\n if len(savelist) != len(saveformat):\n if not silent:\n print(\"Skipping line {0}\".format(skip+nread+1))\n else:\n #\n # Error, not enough columns\n #\n if not silent:\n print(\"Skipping line {0}\".format(skip+nread+1))\n continue\n goodlist.append(savelist)\n if len(goodlist) == 0:\n raise IOError('No valid lines found for specified format')\n if not silent:\n print(\"{0} valid lines read.\".format(len(goodlist)))\n #\n # Zip the good list\n #\n goodcols = zip(*goodlist)\n #\n # Convert the columns to pylab arrays\n #\n dtypes = { 'A':'S','B':'b','I':'i2','L':'i4','K':'i8','F':'f','D':'d' }\n converted = [np.array(goodcols[k],dtype=dtypes[saveformat[k]])\n for k in range(len(saveformat))]\n return tuple(converted)", "def read_csv(file, column_number):\n l = []\n\n # Read the file content\n with open(file, \"r\", encoding='utf-8', errors='ignore') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count != 0:\n l.append(row[column_number])\n line_count += 1\n\n line_count = 1\n length = len(l)\n\n # Writes the accessions in the required format\n accessions = ('[')\n for element in l:\n accessions += '[\"' + str(element) + '\"]'\n if line_count < length:\n accessions += ', '\n line_count += 1\n accessions += (']')\n\n return accessions", "def read_file(filepath):\n\tfile = open(filepath, 'r',encoding = \"utf-8\")\n\tdata = file.readlines()\n\tdata_list = []\n\tfor i in range(len(data)):\n\t\tif i != 0:\n\t\t\tdata_list.append(data[i])\n\tnum_list = []\n\tword_list = []\n\tfor l in data_list:\n\t\tif l != '\\n':\n\t\t\tentry = l.split('\\t')\n\t\t\tnum_list.append(int(entry[0]))\n\t\t\tword_list.append(entry[1][:-1])\n\treturn num_list,word_list", "def _read_csv_col(colNum: int, filename: str) -> List[str]:\n col = []\n with open(filename, 'r') as rf:\n reader = csv.reader(rf, delimiter=',')\n for row in reader:\n col.append(str(row[colNum]))\n\n return col[1::] # Ignore the csv header", "def read_txt(self, widths=[3, 21, 4, 6, 4, 6, 12, 12]):\n cols = ['ID', 'SSSSSSSS.mmmuuun', 'AMP', 'THR', 'A-FRQ', 'R-FRQ', 'SIG STRNGTH', 'ABS-ENERGY']\n\n widths = widths\n self.data = pd.read_fwf(self.data_file, widths=widths, header=None, skiprows=self.skip_rows)\n self.data.columns = cols\n\n self.data = self.data.loc[self.data['ID'] == 1]\n self.skip_rows += len(self.data)", "def get_col_names(fname):\n with open(fname) as f:\n cols = f.readline().strip(\"#\\n\").lower()\n cols = (re.sub(r'\\(\\d+\\)', '', cols)\n .replace('/', '_to_')\n .split())\n return cols", "def textread(filepath):\n return np.array(pd.read_csv(filepath, \n sep = \"\\s+|\\t+|\\s+\\t+|\\t+\\s+\",\n header=None,\n comment='#',\n engine='python'))", "def read_2_col_file(file_name):\n myfile = open(file_name, \"r\")\n col1 = []; col2 = []\n lines = myfile.readlines()\n for line in lines:\n value = line.split()\n col1.append(float(value[0]))\n col2.append(1/(float(value[1])))\n myfile.close()\n return col1, col2", "def _load_column(filename, col=0):\n with open(filename) as f:\n col = list(zip(*csv.reader(f)))[0]\n return list(col)", "def read_metadata_txt(path):\n df = pd.read_csv(path,\n sep='\\s+', # Fields are separated by one or more spaces\n usecols=[0, 1, 2, 3, 4], # Grab only the first 4 columns\n # Missing elevation is noted as -999.9\n na_values=[-999.9],\n header=None,\n names=['station_id', 'latitude', 'longitude', 'elevation', 'state'])\n return df", "def _parse_textfile(self):\n\n field_names = list(self.FIELD_NAME_TO_INDEX.keys())\n field_indices = list(self.FIELD_NAME_TO_INDEX.values())\n frame = pd.read_csv(\n self.filepath,\n header=None, # MAGIC file has no header line\n delimiter=self.DELIMITER,\n usecols=field_indices,\n names=field_names,\n converters=self.FIELD_CONVERTERS,\n )\n return frame", "def loadtwocol(infile):\n kvdict = {}\n with open(infile) as f:\n for nline, line in enumerate(f):\n line = line.strip()\n if line: # to exclude blank line\n k, v = line.split('\\t')\n kvdict[k] = v\n return kvdict", "def read_features_from_file(filename):\n f = loadtxt(filename)\n return f[:, :4], f[:, 4:] # feature locations, descriptors", "def read_txt(filename):\n result = []\n\n with open(filename) as input_file:\n for row in csv.reader(input_file):\n row = list(map(int, row))\n result.append(row)\n\n return result", "def read_fit_column(file):\n\n # Data was pulled out of an exposure by modifying residual_fringe.py to write out a column of data\n # The function we are testing is fit_1d_background_complex.\n\n file_dir = Path(__file__).parent.resolve()\n file_path = str(file_dir / file)\n\n with fits.open(file_path) as hdu:\n col_data = hdu[1].data\n col_weight = hdu[2].data\n col_wnum = hdu[3].data\n bg_fit = hdu[4].data\n store_freq = hdu[0].header['FFREQ']\n\n return col_data, col_weight, col_wnum, bg_fit, store_freq", "def read_field(file_name):\n\n f = open(file_name, 'r', encoding='utf-8', errors='ignore')\n data = dict()\n row = 1\n for i in f:\n n = 1\n i = i.strip('\\n')\n for symb in i:\n data[(row, n)] = symb\n n += 1\n row += 1\n return data", "def SelectColumn(fileName, column, delim = \"\\t\"):\r\n \r\n #open the file and separate the specific column using a known delim\r\n with open (fileName, encoding = \"utf8\") as inf:\r\n reader = csv.reader (inf, delimiter = delim)\r\n ReqCol = list(zip(*reader))[column]\r\n \r\n return ReqCol", "def read_ppdb(fname):\n\tlst=[]\n\twith open(fname) as fp:\n\t\tfor line in fp:\n\t\t\ttmp1,tmp2,tmp3=line.split('|||')[:3]\n\t\t\ttmp1=tmp1.strip(' ')\n\t\t\ttmp2=tmp2.strip(' ')\n\t\t\ttmp3=tmp3.strip(' ')\n\t\t\tlst.append([tmp1,tmp2,tmp3])\n\t#lst.sort()t\n\treturn lst", "def read_tim(tim_file_name, column=1):\n elements = list()\n with open(tim_file_name,'r') as tim:\n for line in tim:\n line_elements = line.split(' ')\n if len(line_elements)>2 and line_elements[0]=='':\n elements.append(line_elements[column])\n return np.array(elements)", "def read_file(filename, columns):\n\n hostname_list = list(nasa_planets.pl_hostname)\n num_systems = len(hostname_list)\n for quantity, column in columns.items():\n if not hasattr(nasa_planets, quantity):\n setattr(nasa_planets,\n quantity,\n numpy.full((num_systems,), numpy.nan))\n with open(filename, 'r') as input_file:\n for line in input_file:\n entries = line.split()\n host = entries[columns['pl_hostname']]\n system_index = 0\n while (\n system_index < len(hostname_list)\n and\n (\n not hostname_list[system_index].startswith(host)\n or\n (\n len(hostname_list[system_index]) > len(host)\n and\n hostname_list[system_index][len(host)] != ' '\n )\n )\n ):\n system_index += 1\n if system_index == len(hostname_list):\n continue\n for quantity, column in columns.items():\n if quantity != 'pl_hostname':\n try:\n entry_val = int(entries[column])\n except ValueError:\n entry_val = float(entries[column])\n getattr(nasa_planets, quantity)[system_index] = (\n entry_val\n )", "def read_fits_columns(fname, what):\n \n res = {}\n what_list = what if isinstance(what, list) else [what]\n try:\n hdul = fits.open(fname, ignore_missing_end=True)\n t = hdul[1].data # For platefit the first extension contains the data\n\n \n for w in what_list:\n res[w] = t.field(w)\n\n hdul.close()\n except:\n print(\"Something went wrong in reading the columns\")\n raise\n\n\n return res", "def _load_column(filename, col=0):\n with open(filename) as f:\n col = sorted(list(zip(*csv.reader(f)))[0])\n return list(col)", "def read_column(path=None, into=list, linebreak=\"\\n\", lstrip=True, rstrip=True, compression=\"infer\", sheet_name=0, astype=str, exclude=(\"nan\")):\n if path is None:\n from pandas.io.clipboard import clipboard_get\n text = clipboard_get()\n else:\n if path.endswith((\".xls\", \"xlsx\")):\n text = linebreak.join(map(str, read_dataframe(path, sheet_name=sheet_name).index))\n else:\n with get_file_object(path, mode=\"read\", compression=compression, safe_mode=False, verbose=False) as f:\n text = f.read()\n \n elements = list()\n for element in text.split(linebreak):\n if lstrip:\n if isinstance(lstrip, str):\n element = element.lstrip(lstrip)\n else:\n element = element.lstrip()\n if rstrip:\n if isinstance(rstrip, str):\n element = element.rstrip(rstrip)\n else:\n element = element.rstrip()\n if bool(element):\n if element not in exclude:\n element = astype(element)\n elements.append(element)\n return into(elements)", "def dat_reader(fpath, fname):\n\n header = []\n data = []\n with open(fpath + fname + '.dat', 'rb') as file:\n for row in file:\n string_row = row.decode('iso-8859-1')\n if string_row[0] == 'C':\n header.append(string_row)\n else:\n data.append(string_row)\n\n return [header, data]", "def tsv_to_lists(path_file, col_ids, fail_on_short_line=True):\n ret_lists = tuple([] for _ in col_ids)\n max_col = max(col_ids)\n with open(path_file, \"r\") as fin:\n for line in fin:\n line = line.rstrip(\"\\n\")\n cols = line.split(\"\\t\")\n if max_col < len(cols):\n for col_id, col_list in zip(col_ids, ret_lists):\n col_list.append(cols[col_id])\n elif fail_on_short_line:\n raise ValueError(f'Illegal short line in {path_file}\\n{line}')\n return ret_lists", "def read_conll_pos_file(path):\n sents = []\n with open(path, \"r\") as f:\n curr = []\n for line in f:\n line = line.strip()\n if line == \"\":\n sents.append(curr)\n curr = []\n else:\n tokens = line.strip().split(\"\\t\")\n curr.append((tokens[1], tokens[3]))\n return sents", "def read_file(filename):\n reads = []\n labels = []\n\n with open(filename) as f:\n content = f.readlines()\n\n for line in content:\n _, read, label = re.sub('[null\\t\\n\\[\\]\\\"]', '', line).replace(' ', '').split(',')\n reads.append(read)\n labels.append(label)\n \n return reads, labels", "def file_read(file_name):\n \n #open specified file in read mode\n in_file = open(file_name, \"r\")\n \n #create data lists\n sp_length_v3 = []\n sp_period_v3 = [] \n\n #save header to string and split into list\n header_string = in_file.readline()\n header_v3 = header_string.split()\n \n #save revelent data to respective lists\n for line in in_file:\n values = line.split()\n sp_length_v3.append(float(values[1]))\n sp_period_v3.append(float(values[2]))\n \n #close the file\n in_file.close()\n \n #return 3D lists of lists containing data\n ans = [sp_length_v3, sp_period_v3, header_v3]\n \n return ans", "def read_features_from_file(filename):\n\tf = np.loadtxt(filename)\n\treturn f[:,:4],f[:,4:] # feature locations, descriptors", "def read(self, stream):\n root = []\n headings = []\n columns = []\n\n lines = [line.rstrip() for line in stream.read().splitlines()]\n\n if (not args.headings) or args.loose_headings:\n \"\"\"\n Most columns are probably left-justified but some (like numeric data) might be right-justified. We need to\n examine all the lines to see where each column begins and ends. We'll consider a column complete when we reach\n the end of a column where the same position is whitespace on all of the lines.\n \"\"\"\n\n c = 0\n start = 0\n while any([c < len(line) for line in lines]):\n if all([line[c:c+1].ljust(1) in string.whitespace for line in lines]) and \\\n any([line[start:c].strip() for line in lines]):\n \"\"\"\n Remember the beginning and end of this column\n \"\"\"\n columns.append((start, c))\n start = c\n c += 1\n\n \"\"\"\n Complete the trailing column\n \"\"\"\n if any([line[start:].strip() for line in lines]):\n columns.append((start, sys.maxsize))\n else:\n if lines:\n maxlen = max([len(line) for line in lines])\n delimiters = list(re.finditer('(\\s{2,})', lines[0]))\n if delimiters:\n if delimiters[0].start(1) > 0:\n log.debug('First delimiter: {}:{} {!r}'.format(delimiters[0].start(1), delimiters[0].end(1), delimiters[0].group(1)))\n columns.append((0, delimiters[0].end(1)))\n else:\n parser.error('Leading columns in heading row no allowed')\n for (pos, delimiter) in enumerate(delimiters):\n columns.append((delimiter.end(1), maxlen if pos + 1 == len(delimiters) else delimiters[pos + 1].end(1)))\n else:\n columns = [(0, maxlen)]\n else:\n parser.error('No heading row')\n\n log.debug('columns: {columns}'.format(**locals()))\n\n if args.headings and lines:\n headings = [lines[0][stops[0]:stops[1]].strip() for stops in columns]\n\n for line in lines[1 if args.headings else 0:]:\n if args.headings:\n root.append({headings[num]: line[start:stop].strip() for (num, (start, stop)) in enumerate(columns)})\n else:\n root.append([line[start:stop].strip() for (start, stop) in columns])\n\n return (root, headings)", "def read_data(columns, types = {}, filename= \"data/wxobs20170821.txt\"):\n #Initialize my data variable\n data = {}\n for column in columns:\n data[column] = []\n\n with open(filename, \"r\") as datafile:\n # read first three line (header)\n for _ in range(3):\n #print(_)\n datafile.readline()\n\n\n # Read and parse the rest of the file\n for line in datafile:\n split_line = line.split()\n for column in columns:\n i = columns[column]\n t = types.get(column, str)\n value = t(split_line[i])\n data[column].append(value)\n\n return data", "def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels", "def readFromFile(self,ffile,nbcolumns=None,columnsNames='yes',name='no',columnsUnits='no'):\n from exceptions import IOError\n try:\n if self.data.shape != (0,0):\n raise Exception(\"The table already contains values\")\n file = open(ffile, 'r')\n except :\n msg=\"can't open file <%s>...\\n\"%ffile\n raise IOError(msg)\n\n\n fileNameColumns=[]\n fileNameUnits=[]\n fileName=None\n \n filemaxnbcol=0\n fileminnbcol=100\n isonvalues=0\n allvaluesbycolonne=[]\n nbvalueline=0\n cpt=1\n for line in file.readlines():\n separe = line.split()\n if (len(separe) == 0 ):\n # blank line\n continue\n \n \n if ( separe[0] == '#' ):\n # comment line\n cpt=cpt+1\n continue\n elif ( separe[0] == '#TITLE:' ):\n # name line\n separe = separe[1:]\n s=''\n for isep in range(len(separe)):\n s=s+separe[isep]+' '\n fileName=s\n pass\n elif ( separe[0] == '#COLUMN_TITLES:' ):\n # column name line\n separe = separe[1:]\n s=''\n for isep in range(len(separe)):\n s=s+separe[isep]\n s=string.strip(s)\n if ( len(s) == 0 ):\n fileNameColumns=[]\n continue\n fileNameColumns = s.split('|')\n pass\n pass\n elif ( separe[0] == '#columnUnits:' ):\n # unit name line\n fileNameUnits = separe[1:]\n pass\n elif ( cpt == 1 ):\n # column name line\n pass\n else:\n # values line\n nbvalueline=nbvalueline+1\n linenbcol=len(separe)\n filemaxnbcol=max(linenbcol,filemaxnbcol)\n fileminnbcol=min(linenbcol,fileminnbcol)\n linevalues=[]\n \n for isep in range(linenbcol): linevalues.append(float(separe[isep]))\n \n # adjust nb columns if not the same on each line\n # or if the first value's line\n if ( filemaxnbcol < len(allvaluesbycolonne) ):\n for icol in range(filemaxnbcol,len(allvaluesbycolonne)):\n allvaluesbycolonne.append([])\n for il in range(nbvalueline-1):\n allvaluesbycolonne[il].append(0)\n pass\n pass\n elif ( filemaxnbcol > len(allvaluesbycolonne) ):\n for icol in range(len(allvaluesbycolonne),filemaxnbcol):\n allvaluesbycolonne.append([])\n for il in range(nbvalueline-1):\n allvaluesbycolonne[icol].append(0)\n pass\n pass\n pass\n # add values\n for icol in range(linenbcol): allvaluesbycolonne[icol].append(linevalues[icol])\n for icol in range(linenbcol,filemaxnbcol): allvaluesbycolonne[icol].append(0)\n \n cpt=cpt+1\n pass\n file.close()\n #\n # check consistency beetwen arguments and file contents\n #\n # controlling the table parameters\n # \n if ( fileminnbcol != filemaxnbcol ):\n raise IOError(\"colums must have the same number of rows\")\n \n if nbcolumns:\n if ( filemaxnbcol != nbcolumns ):\n raise IOError(\" problem with the number of columns\")\n pass\n \n # Warnings\n if ( ( columnsNames.lower() == 'no' ) and ( len(fileNameColumns) > 0 ) ):\n raise Warning(\" you should specify column names\")\n \n if ( ( columnsNames.lower() == 'yes' ) and ( len(fileNameColumns) == 0 ) ):\n raise Warning(\"you specified columnName(s) but the file doesn\\'t entail column names\")\n \n if ( len(fileNameColumns) < filemaxnbcol ):\n nbcol=len(fileNameColumns)\n for icol in range (nbcol,filemaxnbcol): fileNameColumns.append('col'+str(icol+1))\n \n effectivecolumnNames=fileNameColumns\n \n \n if ( ( name.lower() == 'no' ) and fileName ):\n msg='WARNING: you specified no name but there is name in file'\n print(msg)\n \n if ( ( name.lower() == 'yes' ) and ( fileName == None ) ):\n msg='WARNING: you specified name but there is no name in file'\n print(msg)\n \n if ( ( columnsUnits.lower() == 'no' ) and ( len(fileNameUnits) > 0 ) ):\n msg='WARNING: you specified no units name but there are units name in file'\n print(msg)\n \n if ( ( columnsUnits.lower() == 'yes' ) and ( len(fileNameUnits) == 0 ) ):\n msg='WARNING: you specified units name but there are no units name in file'\n print(msg)\n \n if ( ( len(fileNameUnits) > 0 ) and ( len(fileNameUnits) < filemaxnbcol ) ):\n nbcol=len(fileNameUnits)\n for icol in range (nbcol,filemaxnbcol): fileNameUnits.append('col'+str(icol+1))\n pass\n \n\n\n if fileName:\n self.setName(fileName)\n pass\n if len(fileNameUnits):\n self.setColumnUnits(fileNameUnits)\n\n for i in range(filemaxnbcol):\n if columnsNames.lower()=='yes':\n self.addColumn(effectivecolumnNames[i],allvaluesbycolonne[i])\n pass\n else:\n self.addColumnValues(allvaluesbycolonne[i])\n pass\n return", "def readFile (filename):\n # some OSes need to know that the file might have some special characters\n f = open(filename)\n # convert reader to a list so we can close the file\n result = [ line.strip().split('\\t') for line in f if len(line) > 1 ]\n # close the file so we do not take up extra system resources\n f.close()\n # throw away the header row(s) of the data\n return result[1:]", "def read_matrix(filename):\n with open(filename, 'r') as input_file:\n return [[int(column) for column in row.split()] for row in input_file]", "def csv_file_read(filename, a, b, c):\n dataframe = pd.read_csv(file_path + os.sep + filename, delimiter=None,\n header=None, names=None, index_col=None,\n usecols=[a, b, c], skiprows=1, skipfooter=0,\n nrows=None)\n x1 = dataframe.iloc[:, 0]\n x2 = dataframe.iloc[:, 1]\n x3 = dataframe.iloc[:, 2]\n return x1, x2, x3", "def read_file(file):\n text = []\n with open(file, newline='') as f:\n reader = csv.reader(f)\n next(reader, None) # skip header row\n for row in reader:\n text.append(row)\n return text", "def load_data_file(data_file):\n print(\"Loading from {} ...\".format(data_file.name), end=\"\")\n text_col = \"news_title\"\n theme1_col = \"Q3 Theme1\"\n\n with open(data_file) as f:\n df = pd.read_csv(f, sep=\"\\t\")\n X = df[text_col].tolist()\n y = None\n if theme1_col in df.columns:\n y = df[theme1_col].tolist()\n\n print(\n \"loaded {} lines {} labels ... done\".format(\n len(X), \"with\" if y is not None else \"without\"\n )\n )\n\n print(len(X))\n print(len(y))\n return (X, y)", "def load_from_file(self, file_path):\n board_f = open(file_path, 'r')\n row = board_f.readline().strip('\\n')\n self.data = []\n while row != '':\n self.data.append(list(row.split()))\n row = board_f.readline().strip('\\n')\n board_f.close()", "def load_from_file(self, file_path):\n board_f = open(file_path, 'r')\n row = board_f.readline().strip('\\n')\n self.data = []\n while row != '':\n self.data.append(list(row.split()))\n row = board_f.readline().strip('\\n')\n board_f.close()", "def read_matrix(filename):\n with open(filename, 'r') as input_file:\n return ([[int(column) for column in row.split()] for row in input_file])", "def parse_file(file):\n for line in open(file,'r'):\n line = line.strip()\n token = line.split('\\t')\n ### loop through ids in second column and print with first columns \n for item in token[1].split(','):\n print item+'\\t'+token[0]", "def UpdatePredefinedColumns(self):\n self.predefinedcolumns=[]\n for i in guihelper.getresourcefiles(\"*.pdc\"):\n f=common.opentextfile(i)\n self.predefinedcolumns.append(f.readline().strip())\n f.close()", "def load_data_file(data_file):\n print(\"Loading from {} ...\".format(data_file.name), end=\"\")\n text_col = \"news_title\"\n theme1_col = \"Q3 Theme1\"\n\n with open(data_file, encoding=\"utf8\") as f:\n df = pd.read_csv(f, sep=\"\\t\")\n X = df[text_col].tolist()\n\n y = None\n if theme1_col in df.columns:\n y = df[theme1_col].tolist()\n\n print(\n \"loaded {} lines {} labels ... done\".format(\n len(X), \"with\" if y is not None else \"without\"\n )\n )\n return (X, y)", "def loadtwocol_dlist(infile):\n kvdict = defaultdict(list)\n with open(infile) as f:\n for line in f:\n line = line.strip()\n if line:\n k, v = line.split('\\t')\n kvdict[v].append(k)\n return kvdict", "def load_file(filename):\n\tlabels = []\n\tdocs = []\n\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tcontent = line.split('\\t')\n\n\t\t\tif len(content) > 2:\n\t\t\t\tprint('incorrect read')\n\t\t\t\texit()\n\n\t\t\tif len(content[1]) == 0: continue\n\n\t\t\tdocs.append(str(content[1]).strip('\\r').strip('\\n').strip('\\r\\n'))\n\t\t\tlabels.append(content[0])\n\n\treturn docs, labels", "def read_BED(path, last_col=False):\n if not last_col:\n Data = []\n with open(path) as f:\n for line in f:\n Data.append(line.strip().split()[:6])\n return Data\n\n elif last_col:\n Data = []\n Score = []\n with open(path) as f:\n for line in f:\n Data.append(line.strip().split()[:6])\n Score.append(float(line.strip().split()[-1]))\n return Data, Score\n else:\n print(\"ERROR\")", "def read_txt(path):\n mz = []\n i = []\n with open(path) as f:\n for line in f:\n line = line.split()\n mz.append(float(line[0]))\n i.append(float(line[1]))\n return mz, i", "def read_from_csv(file):\n with open(file) as f:\n next(f)\n data = []\n for line in csv.reader(f, delimiter='\\t'):\n data.append(list(line))\n return data", "def read_file(filename) -> List[Todo]:\n with pathlib.Path(filename).expanduser().open('r') as fp:\n return [Todo(_id, line) for _id, line in enumerate(fp)]", "def read_header(fname):\n \n with open(fname, 'r') as f:\n first_line = f.readline()\n cols_info = first_line.split(' ')\n col_names = []\n for col_info in cols_info:\n col_name = col_info.split('(')[0].strip('#')\n col_names.append(col_name)\n return col_names", "def get_formatted_data(line, indices=None):\n\tfile_data = str.strip(line).split(' ')\n\tif indices is None:\n\t\tdata = list(range(len(file_data)))\n\telse:\n\t\tdata = list(indices)\n\t\t\n\tfor i, file_column in enumerate(data):\n\t\tif file_column is not None:\n\t\t\tdatum = file_data[file_column]\n\t\telse:\n\t\t\tdatum = ' '\n\t\tif '.' in datum:\n\t\t\ttry:\n\t\t\t\tdatum = float(datum)\n\t\t\texcept:\n\t\t\t\tpass\n\t\telse:\n\t\t\ttry:\n\t\t\t\tdatum = int(datum)\n\t\t\texcept:\n\t\t\t\tpass\n\t\tdata[i] = datum\n\treturn data", "def read_text(filename):\n with open(filename, 'r') as f:\n com = f.readline()[0]\n wavelength, flux = np.loadtxt(filename, unpack=True,\n usecols=(0, 1), comments=com)\n return wavelength, flux", "def clauses_from_file(filename):\n with open(filename, \"r\") as fin:\n #remove comments from beginning\n line = fin.readline()\n while(line.lstrip()[0] == 'c'):\n line = fin.readline()\n\n header = line.split(\" \")\n num_literals = int(header[2].rstrip())\n\n lines = fin.readlines()\n\n for i in range(len(lines)):\n lines[i] = lines[i].split(\" \")[:-1]\n lines[i] = [int(x) for x in lines[i]]\n\n return (lines, num_literals)", "def readFile(filename):\n df = pd.read_csv(filename, header=0) # read the file\n return df.iloc[:,:].values", "def file_to_listrows(filename):\n rows = []\n with open(filename) as infile:\n content= infile.read()\n line = content.split('\\n')\n for cells in line:\n rows.append(cells.split('\\t'))\n return rows", "def data_extract(self, file):\n\n file_data = [row.strip().split() for row in open('data/{}'.format(file)).readlines()]\n return file_data", "def find_columns(input_file, title):\n contents = find_table_command(input_file)\n for command in contents:\n if ' '+title+' ' in command:\n command = command.split('\\n')\n command.pop(0)\n command.pop(-1)\n column = []\n for line in command:\n column.append(line.split()[0].strip('\\\"'))\n return column, command", "def read_file(filename):\n population_list = []\n line_counter = 0\n number_of_columns = 0\n for line in filename:\n line = line.split()\n if line_counter == 0:\n number_of_columns = len(line) #Seeing how long the original line is so if there are more than one words in the state name I can put them together when the time comes\n line_counter += 1\n population_list.append(line)\n else:\n length_check = len(line) - number_of_columns #Checking the length of the other lines \n # If the next line is the same length as the previous one nothing happens but if it's longer you put the first two together\n us_state = \" \".join(line[0:1 + length_check])\n state = [us_state] #creates a list with just the state\n#makes a list of all elements in the line other than the state so if the state has two names we can account for it\n population = [int(elements) for elements in line[length_check + 1:]] \n state.extend(population) # puts together the state and all other elements into one list \n population_list.append(state) #here is the list of all the other lists we made\n return population_list", "def read_setup(fname):\n with codecs.open(fname, 'r') as fin:\n n_row, n_col, L, H = list(map(int, fin.readline().split()))\n\n pizza = []\n for _ in range(n_row):\n line = fin.readline().strip()\n pizza.append(line)\n\n return pizza, n_row, n_col, L, H", "def readFromFile(self, inp):\n f = open(inp, \"r\")\n line = f.readline()\n line = line.strip().split(sep=\" \", maxsplit=3)\n self.columns, self.chars, self.pwdLength, _ = line\n self.columns = int(self.columns)\n self.pwdLength = int(self.pwdLength)\n self.func = lmdes\n line = f.readline()\n while line != '':\n pwd, hashV = line.strip().split(sep=\" \", maxsplit=1)\n self.table.insert(hashV, pwd)\n line = f.readline()\n f.close()", "def _read_feather_columns(path, columns, num_splits): # pragma: no cover\n from pyarrow import feather\n\n df = feather.read_feather(path, columns=columns)\n # Append the length of the index here to build it externally\n return _split_result_for_readers(0, num_splits, df) + [len(df.index)]", "def read_data(location, cols, delim):\n data = np.genfromtxt(location,\n delimiter=delim, skip_header=8, usecols=cols)\n \n return data", "def readListFromFile( filename ):\n try:\n with open( filename ) as f:\n lines = f.readlines()\n return lines\n except:\n #e = sys.exc_info()[0]\n print( 'Error reading file ' + filename )", "def read_data(file):\n data = pd.read_csv('facebook-links.txt.anon', delimiter=\"\\t\", header=None)\n data.columns = ['user', 'user_friend_list', 'time']\n return data", "def get_fields():\n fields = []\n with open(\"rules\", \"r\") as f:\n for line in f:\n field, ranges = line.strip().split(\": \")\n r1, r2 = ranges.split(\" or \")\n range1 = get_range(r1)\n range2 = get_range(r2)\n fields.append(Field(field, range1, range2))\n return fields", "def read_dataset(filetxt):\n text = open(filetxt, 'r')\n dataset = text.read()\n dataset = dataset.strip()\n text.close()\n return dataset", "def read_dataset(filetxt):\n text = open(filetxt, 'r')\n dataset = text.read()\n dataset = dataset.strip()\n text.close()\n return dataset", "def load_catme_data_sections(path_to_file):\n\n with open(path_to_file, 'r') as f:\n text = f.read()\n\n sections = text.split('\\n\\n')\n\n return sections", "def read_col(self, colname):\n self.open_msfile()\n data = self.tb.getcol(colname)\n self.close_msfile()\n return data", "def load_data(txt_path: str = RAW_TXT) -> pd.DataFrame:\n df = pd.read_csv(txt_path)[INDICES]\n return df", "def test_csvfile_get_columns(fs: FakeFilesystem) -> None:\n fs.create_file(\"test.csv\", contents=CONTENTS)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"index\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.ASCENDING,\n exact=True,\n ),\n \"temperature\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n \"site\": String(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }", "def myloadtxt(fname, skiprows = 0):\n fin = file(fname)\n for i in range(skiprows):\n fin.readline()\n ln = fin.readline()\n lns = []\n while (ln != \"\"):\n thisln = []\n ln = ln.strip().split()\n for s in ln:\n try:\n f = float(s)\n except:\n f = None\n thisln.append(f)\n lns.append(thisln)\n ln = fin.readline()\n return np.array(lns)", "def read_file(path_file):\n with open(path_file, 'r') as f:\n L = f.readlines()\n if len(L[0]) == 9:\n #Y file\n matrix = np.zeros(len(L)-1)\n for index, l in enumerate(L):\n if index > 0:\n matrix[index-1] = 2*int(l.split(',')[1])-1\n elif len(L[0]) == 7:\n #X file\n matrix = np.chararray((len(L)-1,100))\n for index, l in enumerate(L):\n if index > 0:\n matrix[index-1,:] = list(l.split(',')[1][:-2])\n elif len(L[0]) > 100:\n #X_mat100 file\n matrix = np.zeros((len(L),100))\n for index, l in enumerate(L):\n matrix[index, :] = list(map(float, l.split(\" \")))\n else:\n assert('ERROR')\n return(matrix)", "def io_write_read_2dlist_txt():\n a_2d = [[\"ab\", \"cd\", \"ef\", \"gh\", \"ij\"]] * 3\n with open('2d.txt', 'w') as f:\n for a in a_2d:\n for item in a:\n f.write(f'{item}\\t')\n f.write('\\n')\n\n b_2d = []\n with open('2d.txt', 'r') as f:\n for line in f:\n b = line.strip().split('\\t')\n b_2d.append(b)\n print(b_2d)\n\n ## Output\n # [['ab', 'cd', 'ef', 'gh', 'ij'], ['ab', 'cd', 'ef', 'gh', 'ij'], ['ab', 'cd', 'ef', 'gh', 'ij']]\n\n ## Notes\n # strip([chars])\n # Returns a copy of the string with the leading and trailing characters removed.\n # If [chars] omitted or None, the chars argument defaults to removing whitespace.\n\n # split([str])\n # returns a list of all the words in the string, using [str] as the separator", "def load(file_name):\n with open(file_name, newline='') as f:\n reader = csv.reader(f)\n data = list(reader)\n\n schema = [x.strip() for x in data[0]]\n table = [[int(el) for el in row] for row in data[1:]]\n\n return schema, table", "def extract_columns_sample_file(sample_file, x_unit, y_unit=None):\n sample_table = np.loadtxt(sample_file, delimiter=\",\", comments=\"#\")\n x = sample_table[:, 0] * u.Unit(x_unit)\n y = sample_table[:, 1] if y_unit is None else sample_table[:, 1] * u.Unit(y_unit)\n return x, y" ]
[ "0.7060434", "0.6964561", "0.69441533", "0.67769", "0.6769207", "0.67402285", "0.6630263", "0.6448201", "0.6428578", "0.6414579", "0.628006", "0.6256943", "0.61892265", "0.6181183", "0.6084818", "0.6000709", "0.59918183", "0.5988072", "0.59837323", "0.5958952", "0.59168094", "0.59068", "0.58646405", "0.58628875", "0.5831001", "0.58295155", "0.5826093", "0.5812606", "0.5808599", "0.5789899", "0.57629484", "0.5757942", "0.5735284", "0.57347834", "0.5722018", "0.5707103", "0.5706851", "0.5692519", "0.5683978", "0.56642956", "0.5660587", "0.5654022", "0.5649147", "0.5647262", "0.5644278", "0.5643968", "0.56420517", "0.56401956", "0.56369925", "0.56231284", "0.560623", "0.56025547", "0.55975616", "0.5593866", "0.5593551", "0.5593198", "0.55853647", "0.5576749", "0.55583453", "0.5548335", "0.55469483", "0.554246", "0.5539826", "0.5539826", "0.5532224", "0.55101585", "0.55080247", "0.55050427", "0.5493444", "0.54927576", "0.54859364", "0.5483864", "0.5483555", "0.5480824", "0.5480023", "0.5476243", "0.5460377", "0.54593694", "0.54554754", "0.54542", "0.5439237", "0.54318887", "0.5431465", "0.5423927", "0.54154027", "0.54137385", "0.541334", "0.5413148", "0.5412974", "0.5404399", "0.53967226", "0.53967226", "0.5389254", "0.53890157", "0.5388927", "0.5388039", "0.5387306", "0.53699315", "0.53640014", "0.5363719", "0.5362402" ]
0.0
-1
Write to an ASCII file a list of lists containing floats.
def write_text_row(data, file_name): import types outf = open(file_name, "w") if (type(data[0]) == types.ListType): # It is a list of lists for i in xrange(len(data)): for j in xrange(len(data[i])): outf.write(" %12.5g"%data[i][j]) outf.write("\n") else: # Single list for j in xrange(len(data)): outf.write(" %12.5g"%data[j]) outf.write(" \n") outf.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeFloatListToFile(ldata, prec, filePath):\n\twith open(filePath, \"w\") as fh:\n\t\tfor d in ldata:\n\t\t\tfh.write(formatFloat(prec, d) + \"\\n\")", "def write_float32_list(self, float_list: List[float]) -> None:\n self.write_int32(len(float_list))\n for f in float_list:\n self.write_float32(f)", "def write(lst):\n # TODO", "def write_float_array(f, path, values, dtype='f8'):\n dset = f.create_dataset(path, (len(values),), dtype=dtype)\n dset[:] = values\n f.flush()", "def savealist(alist, filename):\n out = open(filename, \"w\")\n for i in alist:\n out.write(str(i) + \"\\n\") # if i is numeric\n out.close()", "def write_list(self):\n with open(self.path, 'w') as file:\n for i in map(self.addziros, range(1, int(str(1) + self.number_length * '0') + 1)):\n file.write(i + '\\n')\n file.close()", "def write_text_file(data, file_name):\n\timport types\n\toutf = open(file_name, \"w\")\n\tif (type(data[0]) == types.ListType):\n\t\t# It is a list of lists\n\t\tfor i in xrange(len(data[0])):\n\t\t\tfor j in xrange(len(data)):\n\t\t\t\tif type(data[j][i]) == type(0):\n\t\t\t\t\toutf.write(\" %12d\"%data[j][i])\n\t\t\t\telse:\n\t\t\t\t\toutf.write(\" %12.5g\"%data[j][i])\n\t\t\toutf.write(\"\\n\")\n\telse:\n\t\t# Single list\n\t\tfor j in xrange(len(data)):\n\t\t\tif type(data[j]) == type(0):\n\t\t\t\toutf.write(\" %12d\\n\"%data[j])\n\t\t\telse:\n\t\t\t\toutf.write(\" %12.5g\\n\"%data[j])\n\toutf.close()", "def save_lists_to_file(filename, elev_list, dist_list):\n import numpy as np\n\n np.save(file=filename,arr=np.array([elev_list, dist_list]))", "def write_coordinates(coordinate_lst, file):\n\n for c in coordinate_lst:\n file.write(\"\\t\\t\\t\" + str(c[0]) + \",\" + str(c[1]) + \",\" + str(c[2]) + '\\n')", "def insert_floats(self, numbers, location=None, overwrite=False):\n pass\n # def flatten(l): return [x for sublist in l for x in sublist]\n # def tobytes(x): return list(x.to_bytes(size, byteorder=self._byteorder))\n #\n # bytes_to_write = flatten([tobytes(x) for x in numbers])\n # return self.insert_bytes(bytes_to_write, location, overwrite)", "def read_list(f, nb_freqs):\n alist = []\n while len(alist) < nb_freqs:\n line = f.readline()\n splitted = line.split()\n well_splitted = True\n for entry in splitted:\n well_splitted = well_splitted and entry.count('.') <= 1\n if well_splitted:\n entries = splitted\n else:\n if line.count('-') > 0:\n # Probably coming from an SDSS spectrum.\n entries = [line[i:i+12] for i in range(0, len(line) - 1, 12)]\n else:\n entries = [line[i:i+8] for i in range(0, len(line) - 1, 8)]\n for entry in entries:\n try:\n alist.append(float(entry))\n except ValueError:\n # If conversion to float fails, put 0 instead.\n alist.append(0)\n return numpy.array(alist)", "def save_list(lines, filename):\n data = '\\n'.join(lines)\n file = open(filename, 'w')\n file.write(data)\n file.close()", "def SaveListFile(file,lst):\n\tlst = [str(i) +\"\\n\" for i in lst]\n\tif len(lst) == 0:\n\t\treturn\n\twith open(file,'w') as f:\n\t\tf.writelines(lst)\n\treturn lst", "def write_list_to_file(myList, filename):\r\n\r\n with open(filename, \"w\") as outfile:\r\n for entries in myList:\r\n outfile.write(entries)\r\n\t\t\t# add a return after each line\r\n outfile.write(\"\\n\")", "def save_list_to_file(the_list, filepath):\n with open(filepath, 'w') as file_handler:\n for item in the_list:\n file_handler.write(\"{}\\n\".format(item))", "def write_flt_file(filename, data, dsize):\n binfile = open(filename,'wb')\n\n dsize = numpy.array(dsize)\n dsize[-1] = data.shape[0]\n\n header = [len(dsize)] # dimension\n header.extend(list(dsize)) # size\n header.append(4) # data type: float\n header.append(dsize.prod()) # total length of data\n\n a = array.array('i')\n a.fromlist(header)\n if is_little_endian():\n a.byteswap()\n\n a.tofile(binfile)\n\n a = array.array('f')\n for o in data:\n a.fromlist(list(o))\n if is_little_endian():\n a.byteswap()\n a.tofile(binfile)\n binfile.close()", "def write_list(l, fname):\n thefile = open(fname, \"w\")\n for line in l:\n thefile.write(\"%s\\n\" % line)\n thefile.close()", "def io_write_read_1dlist_txt():\n a = [1, 2, 3, 4, 5]\n with open('1d.txt', 'w') as f:\n for item in a:\n f.write(f'{item}\\t')\n\n with open('1d.txt', 'r') as f:\n line = f.readline()\n b = line.strip().split('\\t')\n b = [int(item) for item in b]\n print(b)\n\n ## Output\n # [1, 2, 3, 4, 5]\n\n ## Notes\n # with statement\n # a convenient method for indicating a particular operation has some cleanup associated with it,\n # and to guarantee that cleanup happens, no matter what\n\n # list comprehension\n # List comprehensions provide a concise way to create lists.", "def create_data_file_from_list(lst, out_filename, dtype, shape):\n with open(out_filename, 'wb+') as out_file:\n out_file = open(out_filename, 'wb+')\n dat_file = np.memmap(out_file, dtype=dtype, shape=shape)\n dat_file[:] = lst[:]\n dat_file.flush()\n size = float(dat_file.nbytes) / (1024 ** 2)\n print('written %s : %.3f MB' % (out_filename, size))", "def save(self, filename):\n np.savetxt(\n filename,\n np.array([np.array(list(e)) for e in self]),\n fmt=\"%.03f\",\n delimiter=\"\\t\",\n )", "def write_list_to_file(file_name: str, list_name: List[str]):\n # Write to a file, overwriting the old contents\n file = open(file_name, 'w')\n\n # Loop through the list, append a newline character to each line\n for item in list_name:\n file.writelines(item + '\\n')\n\n # Close the file\n file.close()", "def __writeToFile(self, filePath, lst): \n \n if not self.outDir is None: \n filePath = os.path.join(self.outDir, filePath) \n \n open(filePath,'a').writelines(lst)", "def writeData(fname,data):\n with open(fname,\"w\",newline=\"\") as fo:\n wr = csv.writer(fo)\n wr.writerow([\"x\"]+[\"Series {}\".format(i+1) for i in range(len(data))])\n # just in case things are of different lengths\n n = max([len(d) for d in data])\n for i in range(n):\n lst = [i]\n for d in data:\n try:\n val = d[i]\n except IndexError:\n val = 0\n lst.append(val)\n wr.writerow(lst)", "def read_floats(filepointer):\n\tdata = read_strings(filepointer)\n\tif not data:\n\t\treturn None\n\ttry:\n\t\tdata = [float(x) for x in data]\n\t\treturn data\n\texcept:\n\t\t# try the next line\n\t\treturn read_floats(filepointer)", "def list_to_file(l, file_name):\r\n fw = open(file_name, 'w', encoding = 'utf-8')\r\n fw.write('\\n'.join(l))\r\n fw.close()", "def write_lines(list_of_lines, file):\r\n for i in range(0, len(list_of_lines)):\r\n file.write(list_of_lines[i] + b\"\\n\")", "def persist_list_to_csv(liste, nom_fichier):\n with open(nom_fichier, 'w') as f:\n for elem in liste :\n f.write(\"{}\\n\".format(elem))", "def dump(points, filename):\n with open(filename, 'w') as f:\n for i, pts in enumerate(points):\n for x, y in pts:\n f.write(f\"{x:.3f},{y:.3f},{i}\\n\")\n print(f\"Dumping data to {filename}...\")", "def write_string_list_to_file(string_list, filename):\n with open(filename, 'w') as f:\n for element in string_list:\n f.write(element+'\\n')", "def write_float(self, f: float) -> None:\n self.write(STRUCT_FLOAT.pack(f))", "def SaveList(list_variable, strfile, separator=','):\n\n robomath.Mat(list_variable).tr().SaveMat(strfile, separator)", "def write_into_file(name, liste):\n file = open(name, \"w\")\n for item in liste:\n file.write(item)\n file.write('\\n')\n file.close()", "def save_list(list_data, path, lineterminator='\\n', encoding=None, mode='w'):\n with open(path, mode) as f:\n list_data = [item + lineterminator for item in list_data]\n if encoding is not None:\n list_data = [item.encode(encoding) for item in list_data]\n\n f.writelines(list_data)", "def to_floats(lst):\n vals = []\n for arg in lst:\n vals.append( float(arg) )\n return vals", "def save_list_of_list(data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n if encoding is not None:\n data = [[item.encoding(encoding) for item in items]\n for items in data]\n writer.writerows(data)", "def writedata(filename, inputs, outputs):#=None, noutput=1):\n\t#if outputs == None:\n\t#\toutputs = np.zeros(shape=(np.shape(inputs)[0], noutput))\n\tassert np.shape(inputs)[0] == np.shape(outputs)[0]\n\tndp = np.shape(inputs)[0]\n\tnin = np.shape(inputs)[1]\n\tnout = np.shape(outputs)[1]\n\tf = open(filename, \"w\")\n\tf.write(\"%i %i %i\\n\" % (ndp, nin, nout))\n\tfor inputline, outputline in zip(inputs, outputs):\n\t\tf.write(\" \".join([\"%.6f\" % item for item in inputline]) + \"\\n\")\n\t\tf.write(\" \".join([\"%.6f\" % item for item in outputline]) + \"\\n\")\n\tf.close()\n\tlogger.debug(\"Wrote %s\\n(datapoints : %i, inputs : %i, outputs : %i)\" % (filename, ndp,\n\t\t\t\t\t\t\t\t\t\t nin, nout))", "def f2c_file_read_write_function():\n with open('Fdeg.dat', 'r') as infile:\n data = [i.strip().split() for i in infile] # store data as list\n\n data = data[3:] # get lines with numerical values only\n\n F_list = [float(line[-1]) for line in data]\n C_list = [5/9.0*F - 32 for F in F_list]\n\n for i in range(len(C_list)):\n print(\"{:6g}F {:10.2f}C\".format(F_list[i], C_list[i]))\n\n return F_list", "def write_vec(f, vec, name, vec_type):\n f.write('%s %s[%d] = {\\n' % (vec_type, name, len(vec)))\n\n # Write vector elements\n for i in range(len(vec)):\n if vec_type == 'c_float':\n f.write('(c_float)%.20f,\\n' % vec[i])\n else:\n f.write('%i,\\n' % vec[i])\n\n f.write('};\\n')", "def writeCSV(list, filename):\n with open(filename, \"w\") as file:\n for row in list:\n for i in range(len(row)):\n file.write(str(row[i]))\n if i != len(row) - 1:\n file.write(\",\")\n else:\n file.write(\"\\n\")\n return", "def write_lis_lis(lis_lis,filename,cols=[]):\n lis_lis = [[str(l) for l in lis]\n for lis in lis_lis] # trans every element to str\n #make all inner lists of the same length\n inner_lis_max_len = max(len(lis) for lis in lis_lis)\n lis_lis = [lis + (inner_lis_max_len - len(lis)) * [''] for lis in lis_lis]\n #make element in the same list have the same length\n aligned = []\n for lis in lis_lis:\n width = max([len(l) for l in lis])\n lis = [l + (width - len(l)) * ' ' for l in lis]\n aligned.append(lis)\n new_lis_lis = [';'.join([aligned[i][j] for i in range(len(aligned))]) for j in range(len(aligned[0]))]\n with open(filename+'.txt','w') as w_f:\n if cols:\n print >> w_f,'\\t;'.join(cols)\n for l in new_lis_lis:\n print >> w_f,l", "def save_list_to_file(content: list, dst_path: str, append=False) -> None:\n with io.open(file=dst_path, mode=\"a\" if append else \"w\", encoding='utf-8') as destination_file:\n for element in content:\n destination_file.write(element + \"\\n\")", "def write_fortran(v, filename):\n n = np.array([4*len(v)], dtype='int32')\n v = np.array(v, dtype='float32')\n\n with open(filename, 'wb') as file:\n n.tofile(file)\n v.tofile(file)\n n.tofile(file)", "def write(fp, *data):\n from itertools import chain\n output = []\n header = []\n ntrials = None\n\n header.append(len(data)) # number of units\n\n ptr = 3 + len(data) # first data entry\n for unit in data:\n if ntrials is None:\n ntrials = len(unit)\n header.append(ntrials)\n elif ntrials != len(unit):\n raise ValueError(\"Each unit must have the same number of repeats\")\n header.append(ptr + len(output))\n output.extend(len(trial) for trial in unit)\n for trial in unit:\n output.extend(trial)\n\n for val in chain(header,output):\n fp.write(\"%r\\n\" % val)", "def save_double_list(list1, list2, filename):\r\n the_file = open(filename, \"wb\")\r\n try:\r\n writer = csv.writer(the_file)\r\n if len(list1)!=len(list2):\r\n raise Exception(\"Saving a double list : The list have not the same length !\")\r\n for i in range(len(list1)):\r\n writer.writerow( (list1[i], list2[i]) ) \r\n finally:\r\n the_file.close()", "def write_float(self, f):\n if not isinstance(f, float):\n raise TypeError(\"expected a float, got %r\" % (type(f),))\n\n self.write(self._packers[\"f\"].pack(f))", "def write(self, data, filename):\n id_ = 1\n weightlist_el = Element('weight-list')\n for dataset in data:\n weight_el = SubElement(weightlist_el, 'weight')\n id_el = SubElement(weight_el, 'id')\n id_el.text = str(id_)\n date_el = SubElement(weight_el, 'date')\n date_el.text = str(dataset.date) + 'T12:00:00'\n value_el = SubElement(weight_el, 'value')\n value_el.text = str(dataset.weight)\n comment_el = SubElement(weight_el, 'comment')\n comment_el.text = dataset.note\n id_ += 1\n st_tree = ElementTree(weightlist_el)\n st_tree.write(filename, encoding='UTF-8')", "def get_float_list(gene_file, c):\n\tfile = open(gene_file,'r')\n\tList = []\n\tfor line in file:\n\t\tif not re.match(\"#\", line):\n\t\t\tline = line.strip()\n\t\t\tsline = line.split()\n\t\t\tList.append(atof(sline[c]))\n\tfile.close()\n\treturn List", "def writeList2File(filename, array, overwrite=False, separator=';'):\n mode = 'a'\n if overwrite:\n mode = 'w'\n file = open(filename, mode)\n file.write(separator.join(map(str,array)) + '\\n')", "def write_double(self, f: float) -> None:\n self.write(STRUCT_DOUBLE.pack(f))", "def write_list(outputfilename, list):\r\n try:\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)\r\n except:\r\n input(\"File still open! Please close and press enter to continue\")\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)", "def writeStrListToFile(ldata, filePath, delem=\",\"):\n\twith open(filePath, \"w\") as fh:\n\t\tfor r in ldata:\n\t\t\tif type(r) == list:\n\t\t\t\tr = delem.join(r)\n\t\t\tfh.write(r + \"\\n\")", "def write_list(args, file_list):\n if not args.listfile.endswith(\".txt\"):\n args.listfile += \".txt\"\n outputfile = open(args.listfile, 'w')\n for name in file_list:\n outputfile.write(name)\n outputfile.write(\"\\n\")\n outputfile.close()", "def write_data_to_file(pos, fps, data_file):\n xs = []\n for x,y in pos:\n xs.append(x)\n with open(data_file,'wb') as f:\n np.save(f,pos)\n np.save(f,xs)\n np.save(f,fps)", "def save2file(lis, path):\r\n np.save(path, np.array(lis))", "def cut_data(data):\n out = [[], []]\n data = data.split(\"\\n\")\n for line in data:\n line = line.split(\" \")\n line = remove_empty(line)\n try:\n out[0].append(float(line[0]))\n out[1].append(float(line[1]))\n except IndexError:\n pass\n file = open(\"test.txt\", \"w\")\n for i in out[1]: # DELETE\n file.write(str(i))\n file.write(\"\\n\")\n file.close()\n return out", "def save_data_to_file(file_name, list_of_product_objects):\r\n objfile = open(file_name, 'w')\r\n for row in list_of_product_objects:\r\n objfile.write(row.product_name + \",\" + str(row.product_price) + \"\\n\")\r\n objfile.close()", "def write_list(self, register, data):\n raise NotImplementedError", "def csvwrite(inlist, stringify=False):\n out_list = []\n for entry in inlist:\n if stringify:\n new_entry = []\n for val in entry:\n if not isinstance(val, basestring):\n val = str(val)\n new_entry.append(val)\n entry = new_entry\n this_line = ', '.join([elem_quote(val) for val in entry])\n out_list.append(this_line)\n return out_list", "def writecc (listoflists,file,writetype='w',extra=2):\r\n if type(listoflists[0]) not in [ListType,TupleType]:\r\n listoflists = [listoflists]\r\n outfile = open(file,writetype)\r\n rowstokill = []\r\n list2print = copy.deepcopy(listoflists)\r\n for i in range(len(listoflists)):\r\n if listoflists[i] == ['\\n'] or listoflists[i]=='\\n' or listoflists[i]=='dashes':\r\n rowstokill = rowstokill + [i]\r\n rowstokill.reverse()\r\n for row in rowstokill:\r\n del list2print[row]\r\n maxsize = [0]*len(list2print[0])\r\n for col in range(len(list2print[0])):\r\n items = pstats.colex(list2print,col)\r\n items = map(pstats.makestr,items)\r\n maxsize[col] = max(map(len,items)) + extra\r\n for row in listoflists:\r\n if row == ['\\n'] or row == '\\n':\r\n outfile.write('\\n')\r\n elif row == ['dashes'] or row == 'dashes':\r\n dashes = [0]*len(maxsize)\r\n for j in range(len(maxsize)):\r\n dashes[j] = '-'*(maxsize[j]-2)\r\n outfile.write(pstats.lineincustcols(dashes,maxsize))\r\n else:\r\n outfile.write(pstats.lineincustcols(row,maxsize))\r\n outfile.write('\\n')\r\n outfile.close()\r\n return None", "def write_csv(fname, olist):\n ofile = open(fname, \"wb\")\n writer = csv.writer(ofile, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_ALL)\n writer.writerows(olist)", "def write_vector(vector, outfile):\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n vector = vector.copy()\n for k in vector:\n if isinstance(vector[k], np.ndarray):\n vector[k] = vector[k].round(4).tolist()\n with open(outfile, 'w') as f:\n json.dump(vector, f, separators=(',', ': '), indent=4)\n f.write('\\n')\n\n print(\" ... wrote {}\".format(outfile))", "def write_CSV_data(fname, names, npts, nvar, append, data):\n \n if append > 0:\n f = open(fname,'a')\n else:\n f = open(fname,'w')\n for nm in names:\n f.write(nm+',')\n f.write('\\n')\n for j in range(npts):\n for n in range(nvar):\n f.write('%10.4e, ' % data.value(j,n))\n f.write('\\n')\n f.close()", "def write_list_to_file(ls, save_path):\n # Open in appendation mode given that this function may be called multiple\n # times on the same file (positive and negative sentiment are in separate\n # directories).\n out_file = open(save_path, \"w+\")\n for example in ls:\n out_file.write(example)\n out_file.write('\\n')", "def write_vector(vector, outfile):\r\n out_dir = os.path.dirname(outfile)\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n vector = vector.copy()\r\n for k in vector:\r\n if isinstance(vector[k], np.ndarray):\r\n vector[k] = vector[k].round(4).tolist()\r\n with open(outfile, 'w') as f:\r\n json.dump(vector, f)\r\n f.write('\\n')\r\n\r\n print(\" ... wrote {}\".format(outfile))", "def print_to_file(list_of_lines, file_path):\r\n with open(file_path) as output_file:\r\n write_lines(list_of_lines, output_file)", "def file_write(sp_length, sp_period, header, file_name):\n \n #specify filename and inform write\n out_file = open(file_name, \"w\")\n \n #add headers to file from list\n print(\"{0:>15}\".format(header[0]) ,\\\n \"{0:>15}\".format(header[1]) ,\\\n \"{0:>15}\".format(header[2]), file = out_file)\n \n #add data to file form lists \n for i in range(len(sp_length)):\n print(\"{0:>15}\".format(i) ,\\\n \"{0:>15.3f}\".format(sp_length[i]) ,\\\n \"{0:>15.3f}\".format(sp_period[i]), file = out_file)\n \n #close the file\n out_file.close()", "def list_to_file(sorted_list, filename):\n doc = Document()\n table = doc.add_table(rows=1, cols=2)\n hdr_cells = table.rows[0].cells\n hdr_cells[0].text = 'Word'\n hdr_cells[1].text = 'Occurrence'\n\n for key, value in sorted_list:\n row_cells = table.add_row().cells\n row_cells[0].text = key\n row_cells[1].text = str(value)\n\n doc.save(\"sorted - \" + filename)", "def make_list(filename):\n # with open(filename) as f:\n pois = []\n file = open(filename, 'r')\n for line in file:\n ls = line.split(\",\")\n entry = ((float(ls[1]), float(ls[2])), int(ls[3]))\n pois.append(entry)\n return pois", "def write_sequence(list):\n pass", "def floats(self) -> List[NumericType]:\n return [float(v) for v in self._record]", "def save_data_to_file(file_name, list_of_product_objects):\r\n try:\r\n objF = open(file_name, \"w\")\r\n for row in list_of_product_objects:\r\n objF.write(str(row[0]) + \",\" + str(row[1]) + \"\\n\")\r\n objF.close()\r\n except IOError:\r\n print(\"Unable to locate file\")", "def writeArray(fname,arr):\n fh = open(fname,'w')\n fh.write('%d\\n' % arr.shape[0])\n fh.write('%d\\n' % arr.shape[1])\n for x in range(arr.shape[0]):\n for y in range(arr.shape[1]):\n if arr.dtype == np.complex:\n fh.write('%.7e %.7e\\n' % (arr[x,y].real, arr[x,y].imag))\n else:\n fh.write('%.7e\\n' % (arr[x,y]))\n fh.close()", "def writeFloat(self, value: float):\n self._pack('!f', value)", "def all_measurements_to_list(ms, filename='n'):\n m_list = []\n for el in ms:\n # print(\"def all_measurements_to_list 010: \", el)\n for k, v in el.items():\n # print(\"def all_measurements_to_list 020: \", k, v)\n for meas in v:\n m_list.append(k + ',' + meas['name'])\n # print(\"def all_measurements_to_list 030: \", k + ' : ' + meas['name'])\n # print(m_list)\n # print(\"def all_measurements_to_list 040: length measurement list: \", len(m_list))\n #\n if filename != 'n':\n mlist = open(filename, 'w')\n for el in m_list:\n mlist.writelines(el + '\\n')\n mlist.close()\n # print(\"def all_measurements_to_list 050: list saved as: \", filename)\n else:\n # print(\"def all_measurements_to_list 051: list not saved as file.\")\n pass\n return m_list", "def write_nested_string_list_to_file(string_list, filename):\n with open(filename, 'w') as f:\n for i in range(0,len(string_list)):\n for element in string_list[i]:\n f.write(element+'\\t'+str(i)+'\\n')", "def save_gps_coordinates(points: list, file_name: str):\n\n with open(file_name, \"w\") as file:\n for point in points:\n if isinstance(point[0], list):\n str_point = str(point[0][0]) + \" \" + \\\n str(point[0][1]) + \" \" + str(point[1]) + \"\\n\"\n else:\n str_point = str(point[0]) + \" \" + str(point[1]) + \"\\n\"\n file.write(str_point)", "def VtFloat(list):\n return win32com.client.VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_R8, list)", "def sed_write_prob_mat_list_to_csv(na_list, prob_mat_list, out_path):\n f = gzip.open(out_path, 'w')\n for n in range(len(na_list)):\n na = na_list[n]\n prob_mat = prob_mat_list[n]\n (n_time, n_lb) = prob_mat.shape\n for i2 in xrange(n_time):\n f.write(na)\n for i3 in xrange(n_lb):\n f.write(\"\\t%.4f\" % prob_mat[i2, i3])\n f.write(\"\\n\")\n f.close()", "def toFloatList(values):\n\treturn list(map(lambda va: float(va), values))", "def writeCSV(path,aList):\n\twith open(path,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(aList)\n\tw.close()", "def write_float32(self, f: float) -> None:\n self.buffer += struct.pack(\"<f\", f)", "def write(writer: BitStreamWriter, value: float) -> None:\n\n writer.writeFloat64(value)", "def csv_writelist(file, oldfile, chlst, num):\n import csv\n writelist = checkdifferences(oldfile, chlst, num)\n print('before', writelist)\n with open('{}.csv'.format(file), 'w', newline='') as csvwrite:\n writer = csv.writer(csvwrite, delimiter=';')\n try:\n for eachrow in writelist:\n writer.writerow(eachrow)\n except:\n if TypeError:\n print('Typeerror')\n csvwrite.close()", "def write_list_to_file(input_list, output_folder, delimiter=\" \", header=None):\n with open(output_folder, 'w') as doc_out:\n if header:\n doc_out.write(delimiter.join(header) + \"\\n\")\n for element in input_list:\n doc_out.write(delimiter.join([str(i) for i in element]) + \"\\n\")", "def export_ascii(filename, data, lats, lons):\n ascw = open(filename+\".asc\", \"w\")\n ascw.write(\"\"\"ncols %d\nnrows %d\nxllcenter %.2f\nyllcenter %.2f\ncellsize %.2f\nNODATA_value -9999\"\"\" % (\n len(lons), len(lats),\n lons[0], lats[0],\n lons[1] - lons[0]))\n for i in reversed(range(0, data.shape[0])):\n ascw.write(\"\\n\")\n for j in range(0, data.shape[1]):\n x, y = \"%.2f\" % lons[j], \"%.2f\" % lats[i]\n if j > 0:\n ascw.write(\" \")\n ascw.write(\"%.6f\" % data[i, j])\n ascw.close()", "def __type_of_elements_correct_floats_in_list(self):\n strTestName = 'Float elements in a list (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'type \\'tuple\\' parameter')\n RxCSObject.paramType('parameter1', (tuple))\n RxCSObject.parameter1 = (1, 4)\n\n RxCSObject.paramAddMan('parameter2', 'type \\'tuple or list\\' parameter')\n RxCSObject.paramType('parameter2', (tuple, list))\n RxCSObject.parameter2 = [4, 5, 9]\n\n RxCSObject.paramAddMan('parameter3', 'type \\'list\\' parameter')\n RxCSObject.paramType('parameter3', (list))\n RxCSObject.paramTypeEl('parameter3', (float))\n RxCSObject.parameter3 = [4.1, 5.3, 9.0]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def add_to_csv(file_name, single_list):\n final_list = read_csv(file_name)\n writer = csv.writer(open(file_name, 'wb'), delimiter=',',quoting=csv.QUOTE_MINIMAL)\n final_list.append(single_list)\n for x in final_list:\n writer.writerow(x)", "def write_to_file(output, data, datafields):\n if (len(data) != len(datafields)):\n print \"Error! number of data fields != number of headers!\"\n print 'len: ', len(data), len(datafields)\n print 'shape: ', np.shape(data), np.shape(datafields)\n\n ofile = open(output,'w')\n ofile.write(\"# g(r) in the xy-plane from 2Drdf.py\\n\")\n header = \"# chunk \"\n for element in datafields:\n header += element + \" \"\n\n header = header + '\\n'\n ofile.write(header)\n \n it = 0\n for i in xrange(len(data[0])):\n line = str(it) + \" \"\n it += 1\n for j in xrange(len(data)):\n line += str(float(data[j][i])) + \" \"\n line += \"\\n\"\n ofile.write(line)\n \n ofile.close()\n print \"Finished writing file: \", output", "def save_gps_coordinates_raw(points: list, file_name: str):\n\n with open(file_name, \"w\") as file:\n for point in points:\n file.write(str(point) + \"\\n\")", "def write(self, fname, headers=False, labels=True):\n strlist = self.string_lines(headers=headers, labels=labels)\n \n \n if not hasattr(fname, 'writelines'):\n with open(fname, 'w') as f:\n f.writelines(strlist)\n else:\n fname.writelines(strlist)\n return", "def write_list(self, data, delimiter=\"\\n\"):\n if self.check_list_exists(data):\n with opened_w_error(self.filename, self.lock, \"a\") as (f, err):\n if err:\n logging.error(\"File '%s'. Error: %s\", self.filename, err)\n else:\n f.write(delimiter.join(data))\n else:\n logging.error(\"Data isn't list or it's not contains elements\")", "def save_to_file(cls, list_objs):\n the_list = []\n if list_objs is not None:\n for stuff in list_objs:\n new_stuff = stuff.to_dictionary()\n the_list.append(new_stuff)\n the_list = Base.to_json_string(the_list)\n with open(\"{}.json\".format(cls.__name__), mode='w') as f:\n f.write(str(the_list))", "def save_csv(filename, save_list):\n with open(filename, mode='w') as csv:\n csv.writelines([','.join(item) + '\\n' for item in save_list])", "def fromlist(self, floats):\n self.header = {}\n params = [p for p in self]\n min_len = min(len(params), len(floats))\n for param, value in zip(params[:min_len], floats[:min_len]):\n param.value = value\n for param in params[min_len:]:\n param.value = param.default_value", "def convert_to_floats(str_lst):\n\n float_order = []\n for string in str_lst:\n float_order.append(float(string))\n\n return float_order", "def csv_save_list(list_data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n for item in list_data:\n if encoding is not None:\n writer.writerow([item.encode(encoding)])\n else:\n writer.writerow([item])", "def save_data(data,file):\n\n f = open(file, mode='w',encoding='utf-8', buffering=1024)\n for t in data:\n f.write(str(t[0]) + ', ' + str(t[1]) + '\\n')\n f.close()", "def write_features_to_file(filename,locs,desc):\n savetxt(filename, hstack((locs, desc)))", "def export(tako_list, filename):\n for tak in tako_list:\n tak = tak[0]\n l1 = [tak.ident, \"a\"]\n for gen in tak.genome.weightchr_a:\n l1.append(gen.ident)\n l1.append(gen.weight)\n l1.append(gen.mut_rate)\n l1.append(gen.dom)\n f = os.path.join(\"Data\", (filename[:-4] + \" gene data.csv\"))\n with open(f, 'a', newline=\"\") as csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(l1)\n if len(tak.genome.weightchr_b) != 0:\n l2 = [tak.ident, \"b\"]\n for gen in tak.genome.weightchr_b:\n l2.append(gen.ident)\n l2.append(gen.weight)\n l2.append(gen.mut_rate)\n l2.append(gen.dom) \n writ.writerow(l2)", "def writeQrels(qrelList, fileName):\n with open(fileName, 'w') as f:\n for e in qrelList:\n f.write(qrelEntry2Str(e))\n f.write('\\n')" ]
[ "0.7385591", "0.6873358", "0.6700241", "0.6473418", "0.6448024", "0.64338905", "0.6403743", "0.62480605", "0.6234932", "0.6223964", "0.61342627", "0.61157817", "0.60960627", "0.6086933", "0.607191", "0.59942085", "0.5985232", "0.5915316", "0.59132415", "0.5909576", "0.5845459", "0.5837347", "0.58252734", "0.58229136", "0.58122635", "0.5811334", "0.57941294", "0.57402307", "0.57273567", "0.57255054", "0.5706133", "0.5706057", "0.5703771", "0.56946385", "0.56777936", "0.5660642", "0.56490254", "0.5644169", "0.5634842", "0.5631408", "0.562202", "0.5607154", "0.5606605", "0.5596336", "0.5594632", "0.5590902", "0.5569519", "0.55647135", "0.5537477", "0.5532678", "0.55301106", "0.5522726", "0.5519943", "0.5494621", "0.54818", "0.5469196", "0.5458067", "0.54528016", "0.54467326", "0.54395884", "0.5418813", "0.5408296", "0.5398742", "0.539646", "0.53935647", "0.53888357", "0.5387532", "0.53793466", "0.5375823", "0.537572", "0.53735673", "0.5372521", "0.53646195", "0.5354595", "0.53543067", "0.53445125", "0.53424853", "0.5334307", "0.53262657", "0.53250074", "0.53187716", "0.5317542", "0.53171355", "0.53151757", "0.5311536", "0.5307998", "0.52992266", "0.5293517", "0.5291649", "0.52789843", "0.5275695", "0.5270918", "0.52700883", "0.52595705", "0.5256408", "0.5248025", "0.5246301", "0.52449095", "0.5244464", "0.5243551" ]
0.5709758
30
Read data from text file, if ncol = 1, read all columns if ncol >= 0, just read the (ncol+1)th column.
def read_text_file(file_name, ncol = 0): from string import split inf = file(file_name, "r") line = inf.readline() data = [] while len(line) > 0: if ncol == -1: vdata = split(line) if data == []: for i in xrange(len(vdata)): data.append([float(vdata[i])]) else: for i in xrange(len(vdata)): data[i].append(float(vdata[i])) else: vdata = float(split(line)[ncol]) data.append(vdata) line = inf.readline() return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readOFColumnData(dataFile,nCol):\n fileCheck(dataFile) # does the file exists ? Stop if not.\n #\n # Init list\n data = []\n #\n for line in fileinput.input(dataFile):\n # remove parenthesis if any\n line = line.replace('(', '')\n line = line.replace(')', '') \n # divide each element of the line into words\n words = line.split()\n if words: # if there is a line in fact\n if words[0][0]!='#': #do something only if not comment \n data.append(float(words[nCol])) \n # \n return data", "def load_n_col(file):\n df = pd.read_csv(file, delimiter=\" \", header=None)\n columns = [list(df[col]) for col in df]\n return columns", "def readLines(filename, col=None):\n with open(filename, \"r\") as f:\n lines = f.readlines()\n lines = [ s.rstrip(\"\\n\\r\") for s in lines ]\n if col == None:\n return lines\n else:\n return [ s.split(\"\\t\")[col] for s in lines ]", "def read_column(file_name, column_number):\n flist = []\n empty_lines = 0\n fread = open(file_name,'r')\n for line in fread:\n chompedLine = line.rstrip()\n if not chompedLine:\n empty_lines += 1\n continue\n flist.append(float(chompedLine.split()[column_number-1]))\n\n return flist", "def read_data_6_columns(filename=\"ripple_082-085.dat\", skip=1):\n fileobj = open(filename, 'r')\n # ignore the first skip lines\n for i in range(skip):\n fileobj.readline()\n h = []; k = []; qr =[]; qz =[]; q = []; F = []\n lines = fileobj.readlines()\n for line in lines:\n hval, kval, rval, zval, qval, Fval = line.split()\n h.append(int(hval)) \n k.append(int(kval))\n qr.append(float(rval))\n qz.append(float(zval))\n q.append(float(qval))\n F.append(float(Fval)) \n return h, k, qr, qz, q, F", "def readFile(fname):\n\n fromto = []\n cols = []\n with open(fname , 'r') as f:\n cols = f.readline().split(\",\")[0:4] # Headline\n for line in f:\n tm, frm, to, am = line.split(\",\")[0:4]\n frm = int(frm.lstrip())\n to = int(to.lstrip())\n fromto.append((frm,to))\n return cols, fromto", "def read_file_lines(filename, cols, skip=0, stop=-1, column_major=False, separator='[\\t ]'):\n\n # Set current directory\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n # Open file\n f = open(__location__ + '/' + filename, \"r\")\n\n # Read lines and skip initial lines if necessary\n lines = f.readlines()[skip:]\n\n # Select columns\n res = [[np.float64(line[col]) for col in cols] for line in [re.split(separator, l.strip()) for l in lines]]\n return np.transpose(res) if column_major else res", "def read_file(infile,column_num):\n\n \n column_list = []\n\n with open(infile,'r') as f:\n\n fl = f.readlines()\n\n for line in fl:\n \n \n value = int(line.split()[int(column_num)-1])\n column_list.append(value)\n\n\n return column_list", "def read_slurm_file(filename, cols, skip=54, stop=-34, column_major=True):\n\n # Set current directory\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n # Open file\n f = open(__location__ + '/' + filename, \"r\")\n\n # Read lines and skip initial lines if necessary\n lines = f.readlines()[skip:stop]\n\n # Select columns\n res = [[np.float64(line[col]) for col in cols] for line in [l.split() for l in lines]]\n return np.transpose(res) if column_major else res", "def get_data(file,cols=0,nrows='all'):\n if type(cols)==type(0):\n cols=(cols,)\n nvar=1\n else: nvar=len(cols)\n data=get_str(file,cols,nrows)\n if nvar==1: return array(list(map(float,data)))\n else:\n data=list(data)\n for j in range(nvar): data[j]=array(list(map(float,data[j])))\n return tuple(data)", "def djs_readcol(name,**kwargs):\n import re\n import numpy as np\n #\n # Number of lines\n #\n try:\n f = open(name,'r')\n except IOError:\n return None\n lines = f.readlines()\n f.close()\n nlines = len(lines)\n if 'silent' in kwargs:\n silent = True\n else:\n silent = False\n if 'debug' in kwargs:\n debug = True\n else:\n debug = False\n if debug:\n print(\"{0} contains {1} lines.\".format(name, nlines))\n if 'skip' in kwargs:\n skip = kwargs['skip']\n else:\n skip = 0\n nlines -= skip\n if 'numline' in kwargs:\n nlines = min(kwargs['numline'],nlines)\n #\n # Get the number of columns from the first non-skipped line\n #\n k = skip\n while lines[k][0] == '#':\n k += 1\n whitespace = re.compile(r'\\s+')\n baseline = lines[k].strip().replace(',',' ')\n basecols = whitespace.split(baseline)\n ncol = len(basecols)\n if 'format' in kwargs:\n if re.match(r'^\\(?[ABDFILX, ]+\\)?$',kwargs['format'],re.IGNORECASE) is None:\n print(\"Invalid format string!\")\n return None\n format = kwargs['format'].replace(' ','').upper().lstrip('(').rstrip(')').split(',')\n saveformat = [f for f in format if f != 'X']\n if len(format) < ncol:\n if not silent:\n print('Format string has fewer columns than the file.')\n ncol = len(format)\n else:\n #\n # Assume all floating point format\n #\n format = list('F'*ncol)\n saveformat = format\n if debug:\n print(','.join(format))\n nread = 0\n goodlist = list()\n for l in lines[skip:nlines]:\n nread += 1\n if debug:\n print(l)\n if len(l) < ncol or l[0] == '#':\n if not silent:\n print('Skipping line {0}'.format(skip+nread+1))\n continue\n #\n # Split the line\n #\n cols = whitespace.split(l.strip().replace(',',' '))\n savecols = [cols[k] for k in range(ncol) if format[k] != 'X']\n savelist = list()\n if len(savecols) == len(saveformat):\n for k in range(len(saveformat)):\n if saveformat[k] == 'A':\n #\n # Save strings as is.\n #\n saved = savecols[k]\n elif saveformat[k] == 'B' or saveformat[k] == 'I' or saveformat[k] == 'L':\n try:\n saved = int(savecols[k])\n except ValueError:\n #\n # Error, bad format, skip this line\n #\n if not silent:\n print('Skipping line {0}'.format(skip+nread+1))\n continue\n elif saveformat[k] == 'F' or saveformat[k] == 'D':\n try:\n saved = float(savecols[k])\n except ValueError:\n #\n # Error, bad format, skip this line\n #\n if not silent:\n print('Skipping line {0}'.format(skip+nread+1))\n continue\n else:\n print(\"Whoops, bad format! How did that happen?\")\n continue\n savelist.append(saved)\n if len(savelist) != len(saveformat):\n if not silent:\n print(\"Skipping line {0}\".format(skip+nread+1))\n else:\n #\n # Error, not enough columns\n #\n if not silent:\n print(\"Skipping line {0}\".format(skip+nread+1))\n continue\n goodlist.append(savelist)\n if len(goodlist) == 0:\n raise IOError('No valid lines found for specified format')\n if not silent:\n print(\"{0} valid lines read.\".format(len(goodlist)))\n #\n # Zip the good list\n #\n goodcols = zip(*goodlist)\n #\n # Convert the columns to pylab arrays\n #\n dtypes = { 'A':'S','B':'b','I':'i2','L':'i4','K':'i8','F':'f','D':'d' }\n converted = [np.array(goodcols[k],dtype=dtypes[saveformat[k]])\n for k in range(len(saveformat))]\n return tuple(converted)", "def read_data_4_columns(filename=\"ripple_082-085.dat\"):\n # Process comment and header lines\n fileobj = open(filename, 'r')\n while True:\n s = fileobj.readline()\n if s.startswith('#'):\n print(s)\n continue\n elif s.startswith('h'):\n break\n else:\n print(\"Any comments (including an empty line) should start with #.\")\n print(\"Please fix your input file.\")\n sys.exit(1)\n \n # Go through data points \n h = []; k = []; q = []; F = []\n lines = fileobj.readlines()\n for line in lines:\n # This ignores an empty line\n line = line.rstrip()\n if not line: \n continue\n hval, kval, qval, Fval = line.split()\n h.append(int(hval))\n k.append(int(kval)) \n q.append(float(qval))\n F.append(float(Fval))\n return h, k, q, F", "def read_file(file_name, nrows=None):\n try:\n file_handle = open(file_name)\n except PermissionError as err:\n print('File IO error: ', err, file=STDE)\n else:\n return pandas.read_table(\n file_handle, nrows=nrows, low_memory=False,\n na_values=['NA', '.']\n )", "def read_file(self):\n colspecs = [[0, 7]] # for the id\n names = ['id']\n for question in self.question_list:\n colspecs.extend(question.get_column_range())\n names.extend(question.get_column_names())\n\n self.data = pd.read_fwf(self.file, colspecs=colspecs, encoding=self.encoding, names=names, header=None)\n self.data.fillna(0, inplace=True)\n self.data = self.data.astype(int)\n return self.data", "def read_txt(self, widths=[3, 21, 4, 6, 4, 6, 12, 12]):\n cols = ['ID', 'SSSSSSSS.mmmuuun', 'AMP', 'THR', 'A-FRQ', 'R-FRQ', 'SIG STRNGTH', 'ABS-ENERGY']\n\n widths = widths\n self.data = pd.read_fwf(self.data_file, widths=widths, header=None, skiprows=self.skip_rows)\n self.data.columns = cols\n\n self.data = self.data.loc[self.data['ID'] == 1]\n self.skip_rows += len(self.data)", "def read_data(file):\n\n f = open(file, mode='r')\n\n data = f.read().split('\\n')\n\n # Pop trailing end\n while data[-1] == \"\":\n data.pop()\n\n q = data[::2]\n a = data[1::2]\n f.close()\n\n return q, a", "def textread(filepath):\n return np.array(pd.read_csv(filepath, \n sep = \"\\s+|\\t+|\\s+\\t+|\\t+\\s+\",\n header=None,\n comment='#',\n engine='python'))", "def read_data_file(input_file):\n header_lines = 0\n last_pound_pos = -1\n with open(input_file, 'r') as data_file:\n while (data_file.read(1) == '#'):\n last_pound_pos = data_file.tell()\n header = data_file.readline()\n header_lines += 1\n\n #Read the next lines\n data_1 = data_file.readline().split()\n data_2 = data_file.readline().split()\n data_file.seek(last_pound_pos + 1) #Goes back to the last line of the header\n\n if header_lines == 0:\n data = pd.read_csv(data_file, sep=\" \", header=None).dropna(axis=1, how='all')\n\n else:\n # Single line file\n if len(data_2) == 0:\n data_file.readline()\n\n else:\n\n if len(data_1) != len(\n data_2): #If there is a line containing the number of particles,\n data_file.readline()\n data_file.readline()\n\n try:\n data = pd.read_csv(data_file, sep=\" \", header=None).dropna(axis=1, how='all')\n data.columns = header.split()\n except:\n raise Exception(\"The input file '%s' is corrupted, usually the problem is because \"\\\n \"there is an end of a line that has an additional space\" %input_file)\n\n return data", "def read_col(self, colname):\n self.open_msfile()\n data = self.tb.getcol(colname)\n self.close_msfile()\n return data", "def readMatrix(file):\n file1 = open(file, \"r\")\n rawData = file1.readlines()\n file1.close() \n \n n = round(len(rawData[0])/2) \n \n matrix2D = [[None for x in range(n)] for y in range(n)] \n \n j = 0\n for line in rawData: \n i = 0 \n for element in line:\n if element != \" \":\n if i == n:\n break\n matrix2D[j][i] = element\n i+= 1 \n j+= 1 \n \n return matrix2D", "def loadtxt_fast(filename, dtype=np.int, skiprows=0, delimiter=' '):\n def iter_func():\n with open(filename, 'r') as infile:\n for _ in range(skiprows):\n next(infile)\n skip = 0\n for line in infile:\n line = line.strip().split(delimiter)\n for item in line:\n yield dtype(item)\n loadtxt_fast.rowlength = len(line)\n data = np.fromiter(iter_func(), dtype=dtype)\n data = data.reshape((-1, loadtxt_fast.rowlength))\n return data", "def read_data(fname, cols):\n df = (pd.read_csv(fname, header=None, sep=r\"\\s+\", comment=\"#\",\n names=cols, dtype=np.float64)\n .iloc[1:]) # First line is the total number of trees\n # Could reset_index, but we don't shuffle the DataFrame\n return df", "def read_users(file_name):\n f = open(file_name, \"r\")\n header = f.readline()\n f.close()\n cols = [x.strip(\"\\\"\\n\") for x in header.split('\\t')]\n return cols[12:]", "def get_str(file,cols=0,nrows='all'):\n if type(cols)==type(0):\n cols=(cols,)\n nvar=1\n else: nvar=len(cols)\n lista=[]\n for i in range(nvar): lista.append([])\n buffer=open(file).readlines() \n if nrows=='all': nrows=len(buffer)\n counter=0\n for lines in buffer:\n if counter>=nrows : break\n if lines[0]=='#': continue\n pieces=lines.split()\n if len(pieces)==0: continue\n for j in range(nvar):lista[j].append(pieces[cols[j]])\n counter=counter+1\n if nvar==1: return lista[0]\n else: return tuple(lista)", "def readFile(filename):\n df = pd.read_csv(filename, header=0) # read the file\n return df.iloc[:,:].values", "def __readData(self, f, nRows, nCols):\n # Efficiently allocate all the memory we'll need.\n data = numpy.empty( (nCols, nRows), float )\n\n # Import data from the LFM Solar Wind file\n rowIndex = 0\n for row in f.readlines():\n if len(row.split()) != nCols: continue\n\n for col, field in enumerate(row.split()):\n data[col, rowIndex] = field\n\n rowIndex += 1\n\n # Bad things can happen if the file header says there is more\n # (or less) data than there actually is within the file!\n assert(rowIndex == nRows)\n\n return data", "def read_column(path=None, into=list, linebreak=\"\\n\", lstrip=True, rstrip=True, compression=\"infer\", sheet_name=0, astype=str, exclude=(\"nan\")):\n if path is None:\n from pandas.io.clipboard import clipboard_get\n text = clipboard_get()\n else:\n if path.endswith((\".xls\", \"xlsx\")):\n text = linebreak.join(map(str, read_dataframe(path, sheet_name=sheet_name).index))\n else:\n with get_file_object(path, mode=\"read\", compression=compression, safe_mode=False, verbose=False) as f:\n text = f.read()\n \n elements = list()\n for element in text.split(linebreak):\n if lstrip:\n if isinstance(lstrip, str):\n element = element.lstrip(lstrip)\n else:\n element = element.lstrip()\n if rstrip:\n if isinstance(rstrip, str):\n element = element.rstrip(rstrip)\n else:\n element = element.rstrip()\n if bool(element):\n if element not in exclude:\n element = astype(element)\n elements.append(element)\n return into(elements)", "def method4(fname):\n\t#jfrom cStringIO import StringIO\n\t#from tokenize import generate_tokens\n\timport re\n\tprint \"Method 4: read in files by line\"\n\tprint \"and rather than printing out all of it, only print out specific cols \"\n\tf = open(fname,\"r\")\n\tline = f.readline()\n\ti = 0 \n\t\n\twhile line != '':\n\t\ttmp= line.strip()\n\t\tif tmp :\n\t\t\t#print tmp\n\t\t\t#tmp = line.strip()\n\t\t\ttmpp = tmp.split()\n\t\t\t#i +=1\n\t\t\t#print len(tmpp)\n\t\t\tif len(tmpp) >1:\n\t\t\t\tprint tmpp[1]\n\t\t#tmp = line.split(' ')\n\t\t#i += 1\n\t\t#tmp = 'sdklsd sdjlks '\n\t\t#print len(tmp)\n\t\t#if len(tmp) > 1: \n\t\t\t#print tmp[1]\n\t\tline=f.readline()\n\t\n\tf.close()\n\tprint \"Method 4 done\"", "def read_data(filename):\n f = open(filename, \"r\")\n line = f.readline()\n t, n, m, s, population = line.split()\n line = f.readline()\n board = []\n paths = []\n i = 0\n while line:\n if i < int(n):\n board.append([int(x) for x in line if x != '\\n'])\n else:\n paths.append(line if '\\n' not in line else line[:len(line) - 2])\n line = f.readline()\n i += 1\n return int(t), int(n), int(m), int(s), int(population), paths, np.array(board)", "def load_input(self, number_of_rows_to_read):\n self.dataframe = pandas.read_csv(self.filename, nrows=number_of_rows_to_read)\n #self._describe_input_data()", "def read_data(location, cols, delim):\n data = np.genfromtxt(location,\n delimiter=delim, skip_header=8, usecols=cols)\n \n return data", "def read_BED(path, last_col=False):\n if not last_col:\n Data = []\n with open(path) as f:\n for line in f:\n Data.append(line.strip().split()[:6])\n return Data\n\n elif last_col:\n Data = []\n Score = []\n with open(path) as f:\n for line in f:\n Data.append(line.strip().split()[:6])\n Score.append(float(line.strip().split()[-1]))\n return Data, Score\n else:\n print(\"ERROR\")", "def read_text(filename):\n with open(filename, 'r') as f:\n com = f.readline()[0]\n wavelength, flux = np.loadtxt(filename, unpack=True,\n usecols=(0, 1), comments=com)\n return wavelength, flux", "def loadtxt(filepath,comments='#',delimiter=None,skiprows=0,usecols=None,index_offset=1):\n d = np.loadtxt(filepath,comments=comments,delimiter=delimiter,skiprows=skiprows,usecols=usecols)\n if d.shape[1] < 3:\n raise ValueError('invalid number of columns in input')\n row = d[:,0]-index_offset\n col = d[:,1]-index_offset\n data = d[:,2]\n shape = (max(row)+1,max(col)+1)\n return csr_matrix((data,(row,col)),shape=shape)", "def read_data(path, filename, drop_col=\"index\", dt=\"float32\"):\n\tdata = pd.read_csv(path + filename, sep=\",\", dtype=dt)\n\tdata = data.drop(drop_col, axis=1)\n\treturn data.as_matrix()", "def get_data(data_file_path):\n data_file = open(data_file_path, 'r').readlines()\n data = []\n n = -1\n dim = -1\n for i in range(len(data_file)):\n line_elems = [float(x) for x in data_file[i].split()]\n if i == 0:\n n = int(line_elems[0])\n dim = int(line_elems[1])\n else:\n data.append(np.array(line_elems))\n return data, n, dim", "def readFile (filename):\n # some OSes need to know that the file might have some special characters\n f = open(filename)\n # convert reader to a list so we can close the file\n result = [ line.strip().split('\\t') for line in f if len(line) > 1 ]\n # close the file so we do not take up extra system resources\n f.close()\n # throw away the header row(s) of the data\n return result[1:]", "def read_file(self, fn_input):\n if not os.path.exists(fn_input):\n raise Exception(\"ERROR: Input file does not exist: %s\" % fn_input)\n with open(fn_input, 'rt', newline='') as infile:\n reader = csv.reader(infile)\n self.data = []\n for row in reader:\n self.data.append([])\n for value in row:\n if value == 'nan':\n self.data[-1].append(None)\n else:\n try:\n self.data[-1].append(float(value))\n except:\n raise Exception(\"ERROR: unexpected text in input file: '%s'\" % str(value))", "def readData(self):\n self._setupArrays()\n\n with open(self.filename) as fh:\n datalines = fh.readlines()[self.NLHEAD:]\n\n datalines = self._checkForBlankLines(datalines)\n\n # Set up loop over unbounded indpendent variable\n m = 0 # Unbounded independent variable mark \n while len(datalines) > 0:\n datalines = self._readData1(datalines, m)\n datalines = self._readData2(datalines, m)\n m = m + 1", "def read_2_col_file(file_name):\n myfile = open(file_name, \"r\")\n col1 = []; col2 = []\n lines = myfile.readlines()\n for line in lines:\n value = line.split()\n col1.append(float(value[0]))\n col2.append(1/(float(value[1])))\n myfile.close()\n return col1, col2", "def read(self, filename):\n lines = []\n rawData = []\n file = open(filename, \"rU\")\n csv_reader = csv.reader( file )\n for line in csv_reader:\n lines.append(line)\n for item in range(len(line)):\n line[item] = line[item].replace(\" \",\"\")\n self.headers = lines[0]\n self.types = lines[1]\n rawData = lines[2:]\n for row in rawData:\n newRow = []\n for i in range(len(row)):\n if self.types[i] != 'numeric':\n continue\n else:\n newRow.append(float((row[i].strip())))\n self.finalData.append(newRow)\n self.data = np.matrix(self.finalData)\n\n for i in range(len(self.types)):\n if self.types[i] == 'numeric':\n self.numHeadList.append(self.headers[i])\n i = 0\n for header in self.numHeadList:\n self.header2col[header] = i\n i += 1\n\n return self.data", "def read_dataset(filetxt):\n text = open(filetxt, 'r')\n dataset = text.read()\n dataset = dataset.strip()\n text.close()\n return dataset", "def read_dataset(filetxt):\n text = open(filetxt, 'r')\n dataset = text.read()\n dataset = dataset.strip()\n text.close()\n return dataset", "def readfile(filepath, mode, separator='\\t', datatype=float, skiprows=1):\n if mode == 'df':\n data = pd.read_table(filepath, separator, engine='python')\n if mode == 'npa':\n data = np.loadtxt(filepath, dtype=datatype, skiprows=skiprows)\n return data", "def csv_file_read(filename, a, b, c):\n dataframe = pd.read_csv(file_path + os.sep + filename, delimiter=None,\n header=None, names=None, index_col=None,\n usecols=[a, b, c], skiprows=1, skipfooter=0,\n nrows=None)\n x1 = dataframe.iloc[:, 0]\n x2 = dataframe.iloc[:, 1]\n x3 = dataframe.iloc[:, 2]\n return x1, x2, x3", "def load_data(self, dropna=False):\r\n # Load data, delete Ml index, get number of channels, add\r\n df = pd.read_csv(self.file_path, header=None, index_col=0, dtype='float64')\r\n\r\n cols = df.shape[1]\r\n if cols < 2:\r\n raise ValueError(f'{self} wrong file type.')\r\n\r\n df.columns = ['t'] + [f\"c{i}\" for i in range(1, cols)]\r\n df.index = df.index.astype(int)\r\n df.index.name = 'r'\r\n\r\n if dropna:\r\n df.dropna(axis=1, how='all', inplace=True)\r\n\r\n self.set_data(df)", "def read_file(path_file):\n with open(path_file, 'r') as f:\n L = f.readlines()\n if len(L[0]) == 9:\n #Y file\n matrix = np.zeros(len(L)-1)\n for index, l in enumerate(L):\n if index > 0:\n matrix[index-1] = 2*int(l.split(',')[1])-1\n elif len(L[0]) == 7:\n #X file\n matrix = np.chararray((len(L)-1,100))\n for index, l in enumerate(L):\n if index > 0:\n matrix[index-1,:] = list(l.split(',')[1][:-2])\n elif len(L[0]) > 100:\n #X_mat100 file\n matrix = np.zeros((len(L),100))\n for index, l in enumerate(L):\n matrix[index, :] = list(map(float, l.split(\" \")))\n else:\n assert('ERROR')\n return(matrix)", "def load_data(txt_path: str = RAW_TXT) -> pd.DataFrame:\n df = pd.read_csv(txt_path)[INDICES]\n return df", "def read_from_slow(self, filename):\n self.x, self.y, self.field = np.loadtxt(filename, unpack=True, usecols=(0, 1, 2))\n # Not sure the function easily deals with data files with different number of columns.\n # Maybe not necessary to deal with that situation.\n return self", "async def read(self, n: int = -1) -> AnyStr:\n\n # load file\n if len(self._buffer) == 0 and \"r\" in self.mode:\n await self._download()\n\n # check size\n if n == -1:\n data = self._buffer\n self._pos = len(self._buffer) - 1\n else:\n # extract data to read\n data = self._buffer[self._pos : self._pos + n]\n self._pos += n\n\n # return data\n return data", "def read_data(filename):\n \n ######################################################\n # Disadvantage here: only includes J_up = 11 here, #\n # please manually add more if you have #\n # J_up >= 12 CO lines #\n ######################################################\n \n ascii_data = ascii.read(\n filename, names=[\n \"SOURCE\", \"z\", \"D_L\", \"line_width\",\n \"CO_J_1\", \"eCO_J_1\", \"CO_J_2\", \"eCO_J_2\", \"CO_J_3\", \"eCO_J_3\",\n \"CO_J_4\", \"eCO_J_4\", \"CO_J_5\", \"eCO_J_5\", \"CO_J_6\", \"eCO_J_6\",\n \"CO_J_7\", \"eCO_J_7\", \"CO_J_8\", \"eCO_J_8\", \"CO_J_9\", \"eCO_J_9\",\n \"CO_J_10\", \"eCO_J_10\", \"CO_J_11\", \"eCO_J_11\", \"CI_1\", \"eCI_1\",\n \"CI_2\", \"eCI_2\"])\n\n pd = ascii_data.to_pandas()\n pd = pd.set_index('SOURCE')\n return pd.T", "def read_file(path_to_file):\n 8", "def read_general_file(\n file, delim=\",\", header_rows=\"infer\", skip_rows=None, skip_blank_lines=True, encoding=None\n):\n\n df = pd.read_csv(\n file,\n sep=delim,\n header=header_rows,\n skiprows=skip_rows,\n skip_blank_lines=skip_blank_lines,\n encoding=encoding,\n )\n\n return df", "def read_fit_column(file):\n\n # Data was pulled out of an exposure by modifying residual_fringe.py to write out a column of data\n # The function we are testing is fit_1d_background_complex.\n\n file_dir = Path(__file__).parent.resolve()\n file_path = str(file_dir / file)\n\n with fits.open(file_path) as hdu:\n col_data = hdu[1].data\n col_weight = hdu[2].data\n col_wnum = hdu[3].data\n bg_fit = hdu[4].data\n store_freq = hdu[0].header['FFREQ']\n\n return col_data, col_weight, col_wnum, bg_fit, store_freq", "def _read_data(self):\n with self._open(self.filename, 'rb') as f:\n try:\n f.seek(self._offset_data, self._offset_whence)\n except IOError:\n print('Error: hedp.io.HamamatsuFile seeking outside of file limits.')\n print(' Failed to parse file.')\n print(\" Either the 'offset' or 'dtype' input arguments must be wrong!\")\n raise\n except:\n raise\n\n data_len = np.prod(self.shape)*np.dtype(self._dtype).itemsize\n data_str = f.read(data_len)\n if data_len != len(data_str):\n print(data_len, len(data_str))\n raise ValueError('File ended before all data was read. Probably wrong offset or dtype!')\n\n\n self.data = np.fromstring(data_str, dtype=self._dtype).reshape(self.shape[::-1])\n self.data = np.ndarray.astype(self.data, 'float32')\n\n #self.data = np.fromfile(f, dtype=self._dtype,\n # count=np.prod(self.shape)).reshape(self.shape[::-1])", "def _read_data(self, txtfile):\n data_string = open(txtfile,'r').read()\n return data_string", "def datread(file=None, header=0):\n with open(file, 'r') as fr:\n op = np.array([list(map(float, l.split())) for l in fr.readlines()[header:]])\n return op", "def data_reader(path):\n file = open(path, \"r\")\n data = file.readlines()[4:]\n file.close()\n return data", "def read(self, stream):\n root = []\n headings = []\n columns = []\n\n lines = [line.rstrip() for line in stream.read().splitlines()]\n\n if (not args.headings) or args.loose_headings:\n \"\"\"\n Most columns are probably left-justified but some (like numeric data) might be right-justified. We need to\n examine all the lines to see where each column begins and ends. We'll consider a column complete when we reach\n the end of a column where the same position is whitespace on all of the lines.\n \"\"\"\n\n c = 0\n start = 0\n while any([c < len(line) for line in lines]):\n if all([line[c:c+1].ljust(1) in string.whitespace for line in lines]) and \\\n any([line[start:c].strip() for line in lines]):\n \"\"\"\n Remember the beginning and end of this column\n \"\"\"\n columns.append((start, c))\n start = c\n c += 1\n\n \"\"\"\n Complete the trailing column\n \"\"\"\n if any([line[start:].strip() for line in lines]):\n columns.append((start, sys.maxsize))\n else:\n if lines:\n maxlen = max([len(line) for line in lines])\n delimiters = list(re.finditer('(\\s{2,})', lines[0]))\n if delimiters:\n if delimiters[0].start(1) > 0:\n log.debug('First delimiter: {}:{} {!r}'.format(delimiters[0].start(1), delimiters[0].end(1), delimiters[0].group(1)))\n columns.append((0, delimiters[0].end(1)))\n else:\n parser.error('Leading columns in heading row no allowed')\n for (pos, delimiter) in enumerate(delimiters):\n columns.append((delimiter.end(1), maxlen if pos + 1 == len(delimiters) else delimiters[pos + 1].end(1)))\n else:\n columns = [(0, maxlen)]\n else:\n parser.error('No heading row')\n\n log.debug('columns: {columns}'.format(**locals()))\n\n if args.headings and lines:\n headings = [lines[0][stops[0]:stops[1]].strip() for stops in columns]\n\n for line in lines[1 if args.headings else 0:]:\n if args.headings:\n root.append({headings[num]: line[start:stop].strip() for (num, (start, stop)) in enumerate(columns)})\n else:\n root.append([line[start:stop].strip() for (start, stop) in columns])\n\n return (root, headings)", "def read_csv_file(filename, index_st):\n\tfile = open(filename)\n\treader = csv.reader(file)\n\tdata_all = list(reader)\t\n\tdata = np.array(data_all[index_st:])\n\treturn data", "def read_data(columns, types = {}, filename= \"data/wxobs20170821.txt\"):\n #Initialize my data variable\n data = {}\n for column in columns:\n data[column] = []\n\n with open(filename, \"r\") as datafile:\n # read first three line (header)\n for _ in range(3):\n #print(_)\n datafile.readline()\n\n\n # Read and parse the rest of the file\n for line in datafile:\n split_line = line.split()\n for column in columns:\n i = columns[column]\n t = types.get(column, str)\n value = t(split_line[i])\n data[column].append(value)\n\n return data", "def read(self, n):\n return self.file.read(n)", "def readFromFile(self,ffile,nbcolumns=None,columnsNames='yes',name='no',columnsUnits='no'):\n from exceptions import IOError\n try:\n if self.data.shape != (0,0):\n raise Exception(\"The table already contains values\")\n file = open(ffile, 'r')\n except :\n msg=\"can't open file <%s>...\\n\"%ffile\n raise IOError(msg)\n\n\n fileNameColumns=[]\n fileNameUnits=[]\n fileName=None\n \n filemaxnbcol=0\n fileminnbcol=100\n isonvalues=0\n allvaluesbycolonne=[]\n nbvalueline=0\n cpt=1\n for line in file.readlines():\n separe = line.split()\n if (len(separe) == 0 ):\n # blank line\n continue\n \n \n if ( separe[0] == '#' ):\n # comment line\n cpt=cpt+1\n continue\n elif ( separe[0] == '#TITLE:' ):\n # name line\n separe = separe[1:]\n s=''\n for isep in range(len(separe)):\n s=s+separe[isep]+' '\n fileName=s\n pass\n elif ( separe[0] == '#COLUMN_TITLES:' ):\n # column name line\n separe = separe[1:]\n s=''\n for isep in range(len(separe)):\n s=s+separe[isep]\n s=string.strip(s)\n if ( len(s) == 0 ):\n fileNameColumns=[]\n continue\n fileNameColumns = s.split('|')\n pass\n pass\n elif ( separe[0] == '#columnUnits:' ):\n # unit name line\n fileNameUnits = separe[1:]\n pass\n elif ( cpt == 1 ):\n # column name line\n pass\n else:\n # values line\n nbvalueline=nbvalueline+1\n linenbcol=len(separe)\n filemaxnbcol=max(linenbcol,filemaxnbcol)\n fileminnbcol=min(linenbcol,fileminnbcol)\n linevalues=[]\n \n for isep in range(linenbcol): linevalues.append(float(separe[isep]))\n \n # adjust nb columns if not the same on each line\n # or if the first value's line\n if ( filemaxnbcol < len(allvaluesbycolonne) ):\n for icol in range(filemaxnbcol,len(allvaluesbycolonne)):\n allvaluesbycolonne.append([])\n for il in range(nbvalueline-1):\n allvaluesbycolonne[il].append(0)\n pass\n pass\n elif ( filemaxnbcol > len(allvaluesbycolonne) ):\n for icol in range(len(allvaluesbycolonne),filemaxnbcol):\n allvaluesbycolonne.append([])\n for il in range(nbvalueline-1):\n allvaluesbycolonne[icol].append(0)\n pass\n pass\n pass\n # add values\n for icol in range(linenbcol): allvaluesbycolonne[icol].append(linevalues[icol])\n for icol in range(linenbcol,filemaxnbcol): allvaluesbycolonne[icol].append(0)\n \n cpt=cpt+1\n pass\n file.close()\n #\n # check consistency beetwen arguments and file contents\n #\n # controlling the table parameters\n # \n if ( fileminnbcol != filemaxnbcol ):\n raise IOError(\"colums must have the same number of rows\")\n \n if nbcolumns:\n if ( filemaxnbcol != nbcolumns ):\n raise IOError(\" problem with the number of columns\")\n pass\n \n # Warnings\n if ( ( columnsNames.lower() == 'no' ) and ( len(fileNameColumns) > 0 ) ):\n raise Warning(\" you should specify column names\")\n \n if ( ( columnsNames.lower() == 'yes' ) and ( len(fileNameColumns) == 0 ) ):\n raise Warning(\"you specified columnName(s) but the file doesn\\'t entail column names\")\n \n if ( len(fileNameColumns) < filemaxnbcol ):\n nbcol=len(fileNameColumns)\n for icol in range (nbcol,filemaxnbcol): fileNameColumns.append('col'+str(icol+1))\n \n effectivecolumnNames=fileNameColumns\n \n \n if ( ( name.lower() == 'no' ) and fileName ):\n msg='WARNING: you specified no name but there is name in file'\n print(msg)\n \n if ( ( name.lower() == 'yes' ) and ( fileName == None ) ):\n msg='WARNING: you specified name but there is no name in file'\n print(msg)\n \n if ( ( columnsUnits.lower() == 'no' ) and ( len(fileNameUnits) > 0 ) ):\n msg='WARNING: you specified no units name but there are units name in file'\n print(msg)\n \n if ( ( columnsUnits.lower() == 'yes' ) and ( len(fileNameUnits) == 0 ) ):\n msg='WARNING: you specified units name but there are no units name in file'\n print(msg)\n \n if ( ( len(fileNameUnits) > 0 ) and ( len(fileNameUnits) < filemaxnbcol ) ):\n nbcol=len(fileNameUnits)\n for icol in range (nbcol,filemaxnbcol): fileNameUnits.append('col'+str(icol+1))\n pass\n \n\n\n if fileName:\n self.setName(fileName)\n pass\n if len(fileNameUnits):\n self.setColumnUnits(fileNameUnits)\n\n for i in range(filemaxnbcol):\n if columnsNames.lower()=='yes':\n self.addColumn(effectivecolumnNames[i],allvaluesbycolonne[i])\n pass\n else:\n self.addColumnValues(allvaluesbycolonne[i])\n pass\n return", "def read(self):\n with open(self.filename) as f:\n reader=csv.reader(f)\n for row in reader:\n self.data.appendleft(row)", "def myloadtxt(fname, skiprows = 0):\n fin = file(fname)\n for i in range(skiprows):\n fin.readline()\n ln = fin.readline()\n lns = []\n while (ln != \"\"):\n thisln = []\n ln = ln.strip().split()\n for s in ln:\n try:\n f = float(s)\n except:\n f = None\n thisln.append(f)\n lns.append(thisln)\n ln = fin.readline()\n return np.array(lns)", "def read_data(filepath):\n data = []\n column_names = []\n\n with open(filepath, 'rt') as csvfile:\n data_reader = csv.reader(csvfile, delimiter=',')\n flag = False\n for row in data_reader:\n if not flag:\n column_names = row\n flag = True\n else:\n data.append(row)\n\n return column_names, np.array(data)", "def read_rows(self,rownumber,n_rows,reverse=True):\n raise NotImplementedError('To be implemented')\n\n # go to start of the row\n self.filfile.seek(int(self.datastart+self.channels*rownumber*(int(self.nbits/8))))\n # read data into 2-D numpy array\n data=np.fromfile(self.filfile,count=self.channels*n_rows,dtype=self.dtype).reshape(n_rows, self.channels)\n if reverse:\n data = data[:,::-1]\n return data", "def loadonecol(infile):\n slist = []\n with open(infile) as f:\n for line in f:\n line = line.strip()\n if line: # exclude blank line\n slist.append(line)\n return slist", "def read_data(self, path):\n if self.data_format == 'twenty': \n length = 20\n else: raise ValueError(\"self.data_format = '%s' unknown.\" % \n self.data_format)\n data = []\n with open(path,'r') as f:\n for line in f:\n data.append([float(line[k:(k + length)]) for k in range(\n 0, len(line.strip('\\n')),length)])\n return np.array(data)", "def read_tim(tim_file_name, column=1):\n elements = list()\n with open(tim_file_name,'r') as tim:\n for line in tim:\n line_elements = line.split(' ')\n if len(line_elements)>2 and line_elements[0]=='':\n elements.append(line_elements[column])\n return np.array(elements)", "def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs): # pragma: no cover\n\n df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs)\n # Append the length of the index here to build it externally\n return _split_result_for_readers(0, num_splits, df) + [len(df.index)]", "def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels", "def read_metadata_txt(path):\n df = pd.read_csv(path,\n sep='\\s+', # Fields are separated by one or more spaces\n usecols=[0, 1, 2, 3, 4], # Grab only the first 4 columns\n # Missing elevation is noted as -999.9\n na_values=[-999.9],\n header=None,\n names=['station_id', 'latitude', 'longitude', 'elevation', 'state'])\n return df", "def get_column_from_file(file_name, column_number):\n\n file = open(file_name, 'r')\n column_values = []\n for line in file:\n row_values = [int(value.strip()) for value in line.split()]\n column_values.append(row_values[column_number])\n\n return column_values", "def readTab(file_name):\n data = []\n meta = []\n l=0\n for line in open(file_name):\n if l<3:\n meta.append(line.strip(\"\\n\").split(\"\\t\"))\n else:\n if len(line.strip(\"\\n\").split(\"\\t\")) == len(meta[0]):\n data.append(line.strip(\"\\n\").split(\"\\t\"))\n l += 1\n return (meta, data)", "def readTxt(path2file, verbose = True):\n\n if (verbose):\n\t print('Parsing file ', path2file)\n\n ret = pd.read_csv(path2file, sep=';', header = None, names = ['Received', 'ExchTime', 'OrderId', 'Price', 'Amount',\n 'AmountRest', 'DealId', 'DealPrice', 'OI', 'Flags'], skiprows = 3, parse_dates = ['Received', 'ExchTime'], \n date_parser = lambda x: datetime.strptime(x, '%d.%m.%Y %H:%M:%S.%f'),\n converters = {'OrderId': int, 'Price': int, 'Amount': int,'AmountRest': int, 'DealId': int, 'DealPrice': int, 'OI': int, 'Flags': str})\n\n if (verbose):\n print('Finished parsing ', path2file)\n\n return ret", "def _read_csv_col(colNum: int, filename: str) -> List[str]:\n col = []\n with open(filename, 'r') as rf:\n reader = csv.reader(rf, delimiter=',')\n for row in reader:\n col.append(str(row[colNum]))\n\n return col[1::] # Ignore the csv header", "def test_too_many_cols1():\n text = dedent(\n \"\"\"\n A B C\n 1 2 3\n 4 5 6\n 7 8 9 10\n 11 12 13\n \"\"\"\n )\n with pytest.raises(InconsistentTableError) as e:\n FastBasic().read(text)\n assert (\n \"Number of header columns (3) inconsistent with data columns in data line 2\"\n in str(e.value)\n )", "def readFromFile(self, inp):\n f = open(inp, \"r\")\n line = f.readline()\n line = line.strip().split(sep=\" \", maxsplit=3)\n self.columns, self.chars, self.pwdLength, _ = line\n self.columns = int(self.columns)\n self.pwdLength = int(self.pwdLength)\n self.func = lmdes\n line = f.readline()\n while line != '':\n pwd, hashV = line.strip().split(sep=\" \", maxsplit=1)\n self.table.insert(hashV, pwd)\n line = f.readline()\n f.close()", "def _read_feather_columns(path, columns, num_splits): # pragma: no cover\n from pyarrow import feather\n\n df = feather.read_feather(path, columns=columns)\n # Append the length of the index here to build it externally\n return _split_result_for_readers(0, num_splits, df) + [len(df.index)]", "def load_data(path):\n # data = pd.read_csv(r'C:\\Users\\Gor\\Desktop\\ALL.txt', header=None, sep=';')\n data = pd.read_csv(path, header=None, sep=',')\n # data.columns = ['sum', '1', '2', '3', '4', '5', 'V_all', 'V1', 'V2', 'V3', 'V4', 'V5']\n # data = data.drop(['sum', 'V_all', 'V1', 'V2', 'V3', 'V4', 'V5'], axis=1)\n # data.columns = ['V_all', 'sum', '1', '2', '3', '4', '5']\n data.columns = ['1', '2', '3', '4', '5']\n # data = data.drop(['sum', 'V_all', 'V1', 'V2', 'V3', 'V4', 'V5'], axis=1)\n # data = data[:10000]\n # data = data.drop_duplicates()\n\n return data[['1', '2', '3', '4', '5']]", "def parseFileInput(data: List[str]):\n rowsAndCols = data[0].strip().split(' ')\n rows = int(rowsAndCols[0])\n cols = int(rowsAndCols[1])\n\n res = ''\n for line in data[1:]:\n for item in line.strip().split(' '):\n res += item\n\n return res, [rows, cols]", "def read_data(self,filename):\n self.x = [] #Input values\n self.t = [] #Target values\n\n with open(filename, \"r\") as infile:\n lines = infile.readlines()\n self.n = len(lines)\n for line in lines:\n words = line.split()\n self.x.append(float(words[0]))\n self.t.append(float(words[1]))\n\n self.x = np.array(self.x)\n self.t = np.array(self.t)\n self.create_design_matrix()", "def read_file(file):\n text = []\n with open(file, newline='') as f:\n reader = csv.reader(f)\n next(reader, None) # skip header row\n for row in reader:\n text.append(row)\n return text", "def read_infile(infile):\n # There are a variable header lengths possible.\n # Loop through and look for when the line starts\n # with '1', the first index.\n nheader = 0\n try:\n with open(infile, 'r') as f:\n for line in f:\n if line.strip().startswith('1'):\n break\n nheader += 1\n except IOError:\n message = f'Unable to open {infile} in modconvert.'\n raise PipeCalError(message)\n index, freq, tbr, flux, trj = np.genfromtxt(infile, unpack=True,\n skip_header=nheader)\n return index, freq, tbr, flux, trj", "def padread(filename, columns=4, out_dtype=np.float32):\n with open(filename, \"rb\") as f: \n A = np.fromfile(f, dtype=np.float32) # accel file: 32-bit float \"singles\"\n B = np.reshape(A, (-1, columns))\n if B.dtype == out_dtype:\n return B\n return B.astype(out_dtype)", "def load_data(path, file, verbose=False, index=0):\n \n df = pd.read_csv(path+file, index_col=index)\n \n if verbose:\n shape = f'{df.shape}'\n dtypes = f'{df.dtypes[:30]}'\n head = f'{df.head()[:10]}'\n name = file.split('.')[0]\n \n print(f'{name} shape'.center(80, '-'))\n print(shape.center(80))\n print(f\"{name}'s column types\".center(80, '-'))\n print(dtypes)\n print(f\"{name} first five rows\".center(80, '-'))\n print(head)\n \n return df", "def loadresultsfile(fname, colname):\n \n f = open(fname)\n a = f.readline().split()\n f.close()\n b = []\n #Specifically imports the data under the \"Mean1\" column.\n for x in a: \n b.append(x.find(colname))\n col = b.index(0) + 1\n return(np.loadtxt(fname, skiprows=1, usecols=(col,)))", "def load_data(file_to_read):\n\n data = np.recfromtxt(file_to_read)\n data = np.asarray(data)\n\n return data", "def read_text_row(fnam, format=\"\", skip=\";\"):\n\tfrom string import split\n\n\tinf = file(fnam, \"r\")\n\tstrg = inf.readline()\n\tx = []\n\tdata = []\n\twhile (len(strg) > 0):\n\t\tcom_line = False\n\t\tfor j in xrange(len(strg)):\n\t\t\tif(strg[j] == skip):\tcom_line = True\n\t\tif com_line == False:\n\t\t\tword=split(strg)\n\t\t\tif format == \"s\" :\n\t\t\t\tkey = int(word[1])\n\t\t\t\tif key != len(word) - 2:\n\t\t\t\t\tdel word\n\t\t\t\t\tword = []\n\t\t\t\t\tword.append(strg[0 : 5])\n\t\t\t\t\tword.append(strg[6 : 7])\n\t\t\t\t\tfor k in xrange(key):\n\t\t\t\t\t\tk_start = 7 + k*13\n\t\t\t\t\t\tk_stop = k_start + 13\n\t\t\t\t\t\tword.append(strg[k_start : k_stop])\t\t\t\t\n\t\t\tline=[]\n\t\t\tfor i in xrange(len(word)):\n\t\t\t\tline.append(float(word[i]))\n\t\t\tdata.append(line)\n\t\tstrg=inf.readline()\n\tinf.close\n\treturn data", "def read_dataframe(file_name, columns):\n df = pd.read_csv(file_name, sep = \",\")\n return df[columns]", "def read_from(self, filename):\n self.x, self.y = np.loadtxt(filename, unpack=True, usecols=(0, 1))", "def read_file(filepath):\n\tfile = open(filepath, 'r',encoding = \"utf-8\")\n\tdata = file.readlines()\n\tdata_list = []\n\tfor i in range(len(data)):\n\t\tif i != 0:\n\t\t\tdata_list.append(data[i])\n\tnum_list = []\n\tword_list = []\n\tfor l in data_list:\n\t\tif l != '\\n':\n\t\t\tentry = l.split('\\t')\n\t\t\tnum_list.append(int(entry[0]))\n\t\t\tword_list.append(entry[1][:-1])\n\treturn num_list,word_list", "def get_csv_column(file_name, column):\n list = []\n with open('../test_files/' + file_name, 'r') as infile:\n for x in infile.readlines():\n x = x.replace('\\n', '')\n # splitting based on ',' that are encountered in csv files.\n #column-1 because the range start from 0 , so if user enters 1st column then its 0th column we need to fetch\n list.append(x.split(',')[column - 1])\n return list", "def _parse_textfile(self):\n\n field_names = list(self.FIELD_NAME_TO_INDEX.keys())\n field_indices = list(self.FIELD_NAME_TO_INDEX.values())\n frame = pd.read_csv(\n self.filepath,\n header=None, # MAGIC file has no header line\n delimiter=self.DELIMITER,\n usecols=field_indices,\n names=field_names,\n converters=self.FIELD_CONVERTERS,\n )\n return frame", "def test_many_columns(parallel, read_basic):\n # create a string with 500 columns and two data rows\n text = \" \".join([str(i) for i in range(500)])\n text += \"\\n\" + text + \"\\n\" + text\n table = read_basic(text, parallel=parallel)\n expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)])\n assert_table_equal(table, expected)", "def load_data_file(data_file):\n print(\"Loading from {} ...\".format(data_file.name), end=\"\")\n text_col = \"news_title\"\n theme1_col = \"Q3 Theme1\"\n\n with open(data_file) as f:\n df = pd.read_csv(f, sep=\"\\t\")\n X = df[text_col].tolist()\n y = None\n if theme1_col in df.columns:\n y = df[theme1_col].tolist()\n\n print(\n \"loaded {} lines {} labels ... done\".format(\n len(X), \"with\" if y is not None else \"without\"\n )\n )\n\n print(len(X))\n print(len(y))\n return (X, y)", "def read_file(self):\n\n\t\twith open(self.filename, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tif len(line)>1:\n\t\t\t\t\tlenght_value,array_values = line.split(';')\n\t\t\t\t\tlist_values = [int(x) for x in array_values.split(',')]\n\t\t\t\t\tprint self.get_arraysurdit(list_values)", "def import_data(fname,read='row',samples=None,col_num=None):\n assert isinstance(fname,str)\n assert (isinstance(samples,int) and samples>0) or samples is None\n assert read=='row' or (read=='col' and isinstance(col_num,int) and col_num>=0)\n \n #import CSV file\n \n f=open(fname,'r',newline='')\n reader=csv.reader(f)\n header=next(reader)\n data=[]\n \n #Format database, since CSV is all strings. If data empty or formatted incorrectly, then return None.\n \n #Row import\n if read=='row':\n if isinstance(samples,int): #Import sample\n for j in range(samples):\n row=next(reader)\n data.append(format_row(row))\n elif samples is None: #Import all\n for row in reader:\n data.append(format_row(row))\n else:\n raise 'samples variable error'\n \n #Column import\n if read=='col':\n if isinstance(samples,int): #Import sample\n for j in range(samples):\n row=next(reader)\n data.append(format_column(row[col_num],col_num))\n elif samples is None: #Import all\n for row in reader:\n data.append(format_column(row[col_num],col_num))\n else:\n raise 'samples variable error'\n \n return header,data", "def read_datafile( path, seperator='\\t' ):\n data = []\n dataFile = open(path,'r')\n \n for line in dataFile:\n line = line.strip() # Strip line of whitespaces\n values = line.split(seperator) # Split line into seperate values\n \n for i in range( len(values) ):\n try:\n if values[i] != '':\n data[i].append(float(values[i])) # Try to add values to corresponding dimention\n except IndexError:\n data.append( [] ) # Add Dimension if append breaks\n data[i].append( float(values[i]) ) # Append the Value again \n\n dataFile.close()\n\n return data" ]
[ "0.69048285", "0.6310644", "0.6242134", "0.61760336", "0.6040689", "0.60290545", "0.6026688", "0.60042155", "0.587215", "0.58236593", "0.5762683", "0.5731496", "0.5718888", "0.5706086", "0.5696076", "0.5676739", "0.56515324", "0.56390595", "0.56192374", "0.55811644", "0.55722106", "0.5538924", "0.55220044", "0.54926354", "0.5447086", "0.5438206", "0.5424468", "0.5422595", "0.5421211", "0.5408198", "0.5396657", "0.53851765", "0.53729117", "0.53285116", "0.53181267", "0.5308915", "0.53074443", "0.5306924", "0.52970535", "0.52688557", "0.5261508", "0.5258321", "0.5258321", "0.5246812", "0.52280873", "0.52261126", "0.5225957", "0.52100796", "0.51970226", "0.5190193", "0.5182998", "0.5177538", "0.51699317", "0.516911", "0.5166589", "0.5166561", "0.5166539", "0.515993", "0.5154618", "0.5154261", "0.51532996", "0.5144679", "0.5144253", "0.51422644", "0.51392007", "0.5136354", "0.5136258", "0.5135492", "0.5126366", "0.51129353", "0.5111668", "0.511158", "0.50966585", "0.5096567", "0.5094518", "0.50843775", "0.5077633", "0.5074066", "0.50676197", "0.50432724", "0.50323844", "0.5021214", "0.50191325", "0.50153565", "0.5009057", "0.5007624", "0.5007023", "0.50034493", "0.5001011", "0.49986404", "0.49960828", "0.4989809", "0.49819833", "0.49803063", "0.49788544", "0.49783498", "0.4977919", "0.49419537", "0.49408382", "0.49372944" ]
0.73864484
0
Write to an ASCII file a list of lists containing floats.
def write_text_file(data, file_name): import types outf = open(file_name, "w") if (type(data[0]) == types.ListType): # It is a list of lists for i in xrange(len(data[0])): for j in xrange(len(data)): if type(data[j][i]) == type(0): outf.write(" %12d"%data[j][i]) else: outf.write(" %12.5g"%data[j][i]) outf.write("\n") else: # Single list for j in xrange(len(data)): if type(data[j]) == type(0): outf.write(" %12d\n"%data[j]) else: outf.write(" %12.5g\n"%data[j]) outf.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeFloatListToFile(ldata, prec, filePath):\n\twith open(filePath, \"w\") as fh:\n\t\tfor d in ldata:\n\t\t\tfh.write(formatFloat(prec, d) + \"\\n\")", "def write_float32_list(self, float_list: List[float]) -> None:\n self.write_int32(len(float_list))\n for f in float_list:\n self.write_float32(f)", "def write(lst):\n # TODO", "def write_float_array(f, path, values, dtype='f8'):\n dset = f.create_dataset(path, (len(values),), dtype=dtype)\n dset[:] = values\n f.flush()", "def savealist(alist, filename):\n out = open(filename, \"w\")\n for i in alist:\n out.write(str(i) + \"\\n\") # if i is numeric\n out.close()", "def write_list(self):\n with open(self.path, 'w') as file:\n for i in map(self.addziros, range(1, int(str(1) + self.number_length * '0') + 1)):\n file.write(i + '\\n')\n file.close()", "def save_lists_to_file(filename, elev_list, dist_list):\n import numpy as np\n\n np.save(file=filename,arr=np.array([elev_list, dist_list]))", "def write_coordinates(coordinate_lst, file):\n\n for c in coordinate_lst:\n file.write(\"\\t\\t\\t\" + str(c[0]) + \",\" + str(c[1]) + \",\" + str(c[2]) + '\\n')", "def insert_floats(self, numbers, location=None, overwrite=False):\n pass\n # def flatten(l): return [x for sublist in l for x in sublist]\n # def tobytes(x): return list(x.to_bytes(size, byteorder=self._byteorder))\n #\n # bytes_to_write = flatten([tobytes(x) for x in numbers])\n # return self.insert_bytes(bytes_to_write, location, overwrite)", "def read_list(f, nb_freqs):\n alist = []\n while len(alist) < nb_freqs:\n line = f.readline()\n splitted = line.split()\n well_splitted = True\n for entry in splitted:\n well_splitted = well_splitted and entry.count('.') <= 1\n if well_splitted:\n entries = splitted\n else:\n if line.count('-') > 0:\n # Probably coming from an SDSS spectrum.\n entries = [line[i:i+12] for i in range(0, len(line) - 1, 12)]\n else:\n entries = [line[i:i+8] for i in range(0, len(line) - 1, 8)]\n for entry in entries:\n try:\n alist.append(float(entry))\n except ValueError:\n # If conversion to float fails, put 0 instead.\n alist.append(0)\n return numpy.array(alist)", "def save_list(lines, filename):\n data = '\\n'.join(lines)\n file = open(filename, 'w')\n file.write(data)\n file.close()", "def SaveListFile(file,lst):\n\tlst = [str(i) +\"\\n\" for i in lst]\n\tif len(lst) == 0:\n\t\treturn\n\twith open(file,'w') as f:\n\t\tf.writelines(lst)\n\treturn lst", "def write_list_to_file(myList, filename):\r\n\r\n with open(filename, \"w\") as outfile:\r\n for entries in myList:\r\n outfile.write(entries)\r\n\t\t\t# add a return after each line\r\n outfile.write(\"\\n\")", "def save_list_to_file(the_list, filepath):\n with open(filepath, 'w') as file_handler:\n for item in the_list:\n file_handler.write(\"{}\\n\".format(item))", "def write_flt_file(filename, data, dsize):\n binfile = open(filename,'wb')\n\n dsize = numpy.array(dsize)\n dsize[-1] = data.shape[0]\n\n header = [len(dsize)] # dimension\n header.extend(list(dsize)) # size\n header.append(4) # data type: float\n header.append(dsize.prod()) # total length of data\n\n a = array.array('i')\n a.fromlist(header)\n if is_little_endian():\n a.byteswap()\n\n a.tofile(binfile)\n\n a = array.array('f')\n for o in data:\n a.fromlist(list(o))\n if is_little_endian():\n a.byteswap()\n a.tofile(binfile)\n binfile.close()", "def write_list(l, fname):\n thefile = open(fname, \"w\")\n for line in l:\n thefile.write(\"%s\\n\" % line)\n thefile.close()", "def io_write_read_1dlist_txt():\n a = [1, 2, 3, 4, 5]\n with open('1d.txt', 'w') as f:\n for item in a:\n f.write(f'{item}\\t')\n\n with open('1d.txt', 'r') as f:\n line = f.readline()\n b = line.strip().split('\\t')\n b = [int(item) for item in b]\n print(b)\n\n ## Output\n # [1, 2, 3, 4, 5]\n\n ## Notes\n # with statement\n # a convenient method for indicating a particular operation has some cleanup associated with it,\n # and to guarantee that cleanup happens, no matter what\n\n # list comprehension\n # List comprehensions provide a concise way to create lists.", "def create_data_file_from_list(lst, out_filename, dtype, shape):\n with open(out_filename, 'wb+') as out_file:\n out_file = open(out_filename, 'wb+')\n dat_file = np.memmap(out_file, dtype=dtype, shape=shape)\n dat_file[:] = lst[:]\n dat_file.flush()\n size = float(dat_file.nbytes) / (1024 ** 2)\n print('written %s : %.3f MB' % (out_filename, size))", "def save(self, filename):\n np.savetxt(\n filename,\n np.array([np.array(list(e)) for e in self]),\n fmt=\"%.03f\",\n delimiter=\"\\t\",\n )", "def write_list_to_file(file_name: str, list_name: List[str]):\n # Write to a file, overwriting the old contents\n file = open(file_name, 'w')\n\n # Loop through the list, append a newline character to each line\n for item in list_name:\n file.writelines(item + '\\n')\n\n # Close the file\n file.close()", "def __writeToFile(self, filePath, lst): \n \n if not self.outDir is None: \n filePath = os.path.join(self.outDir, filePath) \n \n open(filePath,'a').writelines(lst)", "def read_floats(filepointer):\n\tdata = read_strings(filepointer)\n\tif not data:\n\t\treturn None\n\ttry:\n\t\tdata = [float(x) for x in data]\n\t\treturn data\n\texcept:\n\t\t# try the next line\n\t\treturn read_floats(filepointer)", "def writeData(fname,data):\n with open(fname,\"w\",newline=\"\") as fo:\n wr = csv.writer(fo)\n wr.writerow([\"x\"]+[\"Series {}\".format(i+1) for i in range(len(data))])\n # just in case things are of different lengths\n n = max([len(d) for d in data])\n for i in range(n):\n lst = [i]\n for d in data:\n try:\n val = d[i]\n except IndexError:\n val = 0\n lst.append(val)\n wr.writerow(lst)", "def list_to_file(l, file_name):\r\n fw = open(file_name, 'w', encoding = 'utf-8')\r\n fw.write('\\n'.join(l))\r\n fw.close()", "def write_lines(list_of_lines, file):\r\n for i in range(0, len(list_of_lines)):\r\n file.write(list_of_lines[i] + b\"\\n\")", "def persist_list_to_csv(liste, nom_fichier):\n with open(nom_fichier, 'w') as f:\n for elem in liste :\n f.write(\"{}\\n\".format(elem))", "def dump(points, filename):\n with open(filename, 'w') as f:\n for i, pts in enumerate(points):\n for x, y in pts:\n f.write(f\"{x:.3f},{y:.3f},{i}\\n\")\n print(f\"Dumping data to {filename}...\")", "def write_float(self, f: float) -> None:\n self.write(STRUCT_FLOAT.pack(f))", "def write_string_list_to_file(string_list, filename):\n with open(filename, 'w') as f:\n for element in string_list:\n f.write(element+'\\n')", "def write_text_row(data, file_name):\n\timport types\n\toutf = open(file_name, \"w\")\n\tif (type(data[0]) == types.ListType):\n\t\t# It is a list of lists\n\t\tfor i in xrange(len(data)):\n\t\t\tfor j in xrange(len(data[i])):\n\t\t\t\toutf.write(\" %12.5g\"%data[i][j])\n\t\t\toutf.write(\"\\n\")\n\telse:\n\t\t# Single list\n\t\tfor j in xrange(len(data)):\n\t\t\toutf.write(\" %12.5g\"%data[j])\n\t\toutf.write(\" \\n\")\n\toutf.close()", "def SaveList(list_variable, strfile, separator=','):\n\n robomath.Mat(list_variable).tr().SaveMat(strfile, separator)", "def write_into_file(name, liste):\n file = open(name, \"w\")\n for item in liste:\n file.write(item)\n file.write('\\n')\n file.close()", "def save_list(list_data, path, lineterminator='\\n', encoding=None, mode='w'):\n with open(path, mode) as f:\n list_data = [item + lineterminator for item in list_data]\n if encoding is not None:\n list_data = [item.encode(encoding) for item in list_data]\n\n f.writelines(list_data)", "def to_floats(lst):\n vals = []\n for arg in lst:\n vals.append( float(arg) )\n return vals", "def save_list_of_list(data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n if encoding is not None:\n data = [[item.encoding(encoding) for item in items]\n for items in data]\n writer.writerows(data)", "def writedata(filename, inputs, outputs):#=None, noutput=1):\n\t#if outputs == None:\n\t#\toutputs = np.zeros(shape=(np.shape(inputs)[0], noutput))\n\tassert np.shape(inputs)[0] == np.shape(outputs)[0]\n\tndp = np.shape(inputs)[0]\n\tnin = np.shape(inputs)[1]\n\tnout = np.shape(outputs)[1]\n\tf = open(filename, \"w\")\n\tf.write(\"%i %i %i\\n\" % (ndp, nin, nout))\n\tfor inputline, outputline in zip(inputs, outputs):\n\t\tf.write(\" \".join([\"%.6f\" % item for item in inputline]) + \"\\n\")\n\t\tf.write(\" \".join([\"%.6f\" % item for item in outputline]) + \"\\n\")\n\tf.close()\n\tlogger.debug(\"Wrote %s\\n(datapoints : %i, inputs : %i, outputs : %i)\" % (filename, ndp,\n\t\t\t\t\t\t\t\t\t\t nin, nout))", "def f2c_file_read_write_function():\n with open('Fdeg.dat', 'r') as infile:\n data = [i.strip().split() for i in infile] # store data as list\n\n data = data[3:] # get lines with numerical values only\n\n F_list = [float(line[-1]) for line in data]\n C_list = [5/9.0*F - 32 for F in F_list]\n\n for i in range(len(C_list)):\n print(\"{:6g}F {:10.2f}C\".format(F_list[i], C_list[i]))\n\n return F_list", "def write_vec(f, vec, name, vec_type):\n f.write('%s %s[%d] = {\\n' % (vec_type, name, len(vec)))\n\n # Write vector elements\n for i in range(len(vec)):\n if vec_type == 'c_float':\n f.write('(c_float)%.20f,\\n' % vec[i])\n else:\n f.write('%i,\\n' % vec[i])\n\n f.write('};\\n')", "def writeCSV(list, filename):\n with open(filename, \"w\") as file:\n for row in list:\n for i in range(len(row)):\n file.write(str(row[i]))\n if i != len(row) - 1:\n file.write(\",\")\n else:\n file.write(\"\\n\")\n return", "def write_lis_lis(lis_lis,filename,cols=[]):\n lis_lis = [[str(l) for l in lis]\n for lis in lis_lis] # trans every element to str\n #make all inner lists of the same length\n inner_lis_max_len = max(len(lis) for lis in lis_lis)\n lis_lis = [lis + (inner_lis_max_len - len(lis)) * [''] for lis in lis_lis]\n #make element in the same list have the same length\n aligned = []\n for lis in lis_lis:\n width = max([len(l) for l in lis])\n lis = [l + (width - len(l)) * ' ' for l in lis]\n aligned.append(lis)\n new_lis_lis = [';'.join([aligned[i][j] for i in range(len(aligned))]) for j in range(len(aligned[0]))]\n with open(filename+'.txt','w') as w_f:\n if cols:\n print >> w_f,'\\t;'.join(cols)\n for l in new_lis_lis:\n print >> w_f,l", "def save_list_to_file(content: list, dst_path: str, append=False) -> None:\n with io.open(file=dst_path, mode=\"a\" if append else \"w\", encoding='utf-8') as destination_file:\n for element in content:\n destination_file.write(element + \"\\n\")", "def write_fortran(v, filename):\n n = np.array([4*len(v)], dtype='int32')\n v = np.array(v, dtype='float32')\n\n with open(filename, 'wb') as file:\n n.tofile(file)\n v.tofile(file)\n n.tofile(file)", "def write(fp, *data):\n from itertools import chain\n output = []\n header = []\n ntrials = None\n\n header.append(len(data)) # number of units\n\n ptr = 3 + len(data) # first data entry\n for unit in data:\n if ntrials is None:\n ntrials = len(unit)\n header.append(ntrials)\n elif ntrials != len(unit):\n raise ValueError(\"Each unit must have the same number of repeats\")\n header.append(ptr + len(output))\n output.extend(len(trial) for trial in unit)\n for trial in unit:\n output.extend(trial)\n\n for val in chain(header,output):\n fp.write(\"%r\\n\" % val)", "def write_float(self, f):\n if not isinstance(f, float):\n raise TypeError(\"expected a float, got %r\" % (type(f),))\n\n self.write(self._packers[\"f\"].pack(f))", "def save_double_list(list1, list2, filename):\r\n the_file = open(filename, \"wb\")\r\n try:\r\n writer = csv.writer(the_file)\r\n if len(list1)!=len(list2):\r\n raise Exception(\"Saving a double list : The list have not the same length !\")\r\n for i in range(len(list1)):\r\n writer.writerow( (list1[i], list2[i]) ) \r\n finally:\r\n the_file.close()", "def write(self, data, filename):\n id_ = 1\n weightlist_el = Element('weight-list')\n for dataset in data:\n weight_el = SubElement(weightlist_el, 'weight')\n id_el = SubElement(weight_el, 'id')\n id_el.text = str(id_)\n date_el = SubElement(weight_el, 'date')\n date_el.text = str(dataset.date) + 'T12:00:00'\n value_el = SubElement(weight_el, 'value')\n value_el.text = str(dataset.weight)\n comment_el = SubElement(weight_el, 'comment')\n comment_el.text = dataset.note\n id_ += 1\n st_tree = ElementTree(weightlist_el)\n st_tree.write(filename, encoding='UTF-8')", "def get_float_list(gene_file, c):\n\tfile = open(gene_file,'r')\n\tList = []\n\tfor line in file:\n\t\tif not re.match(\"#\", line):\n\t\t\tline = line.strip()\n\t\t\tsline = line.split()\n\t\t\tList.append(atof(sline[c]))\n\tfile.close()\n\treturn List", "def writeList2File(filename, array, overwrite=False, separator=';'):\n mode = 'a'\n if overwrite:\n mode = 'w'\n file = open(filename, mode)\n file.write(separator.join(map(str,array)) + '\\n')", "def write_double(self, f: float) -> None:\n self.write(STRUCT_DOUBLE.pack(f))", "def write_list(outputfilename, list):\r\n try:\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)\r\n except:\r\n input(\"File still open! Please close and press enter to continue\")\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)", "def writeStrListToFile(ldata, filePath, delem=\",\"):\n\twith open(filePath, \"w\") as fh:\n\t\tfor r in ldata:\n\t\t\tif type(r) == list:\n\t\t\t\tr = delem.join(r)\n\t\t\tfh.write(r + \"\\n\")", "def write_list(args, file_list):\n if not args.listfile.endswith(\".txt\"):\n args.listfile += \".txt\"\n outputfile = open(args.listfile, 'w')\n for name in file_list:\n outputfile.write(name)\n outputfile.write(\"\\n\")\n outputfile.close()", "def write_data_to_file(pos, fps, data_file):\n xs = []\n for x,y in pos:\n xs.append(x)\n with open(data_file,'wb') as f:\n np.save(f,pos)\n np.save(f,xs)\n np.save(f,fps)", "def save2file(lis, path):\r\n np.save(path, np.array(lis))", "def cut_data(data):\n out = [[], []]\n data = data.split(\"\\n\")\n for line in data:\n line = line.split(\" \")\n line = remove_empty(line)\n try:\n out[0].append(float(line[0]))\n out[1].append(float(line[1]))\n except IndexError:\n pass\n file = open(\"test.txt\", \"w\")\n for i in out[1]: # DELETE\n file.write(str(i))\n file.write(\"\\n\")\n file.close()\n return out", "def save_data_to_file(file_name, list_of_product_objects):\r\n objfile = open(file_name, 'w')\r\n for row in list_of_product_objects:\r\n objfile.write(row.product_name + \",\" + str(row.product_price) + \"\\n\")\r\n objfile.close()", "def write_list(self, register, data):\n raise NotImplementedError", "def csvwrite(inlist, stringify=False):\n out_list = []\n for entry in inlist:\n if stringify:\n new_entry = []\n for val in entry:\n if not isinstance(val, basestring):\n val = str(val)\n new_entry.append(val)\n entry = new_entry\n this_line = ', '.join([elem_quote(val) for val in entry])\n out_list.append(this_line)\n return out_list", "def writecc (listoflists,file,writetype='w',extra=2):\r\n if type(listoflists[0]) not in [ListType,TupleType]:\r\n listoflists = [listoflists]\r\n outfile = open(file,writetype)\r\n rowstokill = []\r\n list2print = copy.deepcopy(listoflists)\r\n for i in range(len(listoflists)):\r\n if listoflists[i] == ['\\n'] or listoflists[i]=='\\n' or listoflists[i]=='dashes':\r\n rowstokill = rowstokill + [i]\r\n rowstokill.reverse()\r\n for row in rowstokill:\r\n del list2print[row]\r\n maxsize = [0]*len(list2print[0])\r\n for col in range(len(list2print[0])):\r\n items = pstats.colex(list2print,col)\r\n items = map(pstats.makestr,items)\r\n maxsize[col] = max(map(len,items)) + extra\r\n for row in listoflists:\r\n if row == ['\\n'] or row == '\\n':\r\n outfile.write('\\n')\r\n elif row == ['dashes'] or row == 'dashes':\r\n dashes = [0]*len(maxsize)\r\n for j in range(len(maxsize)):\r\n dashes[j] = '-'*(maxsize[j]-2)\r\n outfile.write(pstats.lineincustcols(dashes,maxsize))\r\n else:\r\n outfile.write(pstats.lineincustcols(row,maxsize))\r\n outfile.write('\\n')\r\n outfile.close()\r\n return None", "def write_csv(fname, olist):\n ofile = open(fname, \"wb\")\n writer = csv.writer(ofile, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_ALL)\n writer.writerows(olist)", "def write_vector(vector, outfile):\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n vector = vector.copy()\n for k in vector:\n if isinstance(vector[k], np.ndarray):\n vector[k] = vector[k].round(4).tolist()\n with open(outfile, 'w') as f:\n json.dump(vector, f, separators=(',', ': '), indent=4)\n f.write('\\n')\n\n print(\" ... wrote {}\".format(outfile))", "def write_CSV_data(fname, names, npts, nvar, append, data):\n \n if append > 0:\n f = open(fname,'a')\n else:\n f = open(fname,'w')\n for nm in names:\n f.write(nm+',')\n f.write('\\n')\n for j in range(npts):\n for n in range(nvar):\n f.write('%10.4e, ' % data.value(j,n))\n f.write('\\n')\n f.close()", "def write_vector(vector, outfile):\r\n out_dir = os.path.dirname(outfile)\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n vector = vector.copy()\r\n for k in vector:\r\n if isinstance(vector[k], np.ndarray):\r\n vector[k] = vector[k].round(4).tolist()\r\n with open(outfile, 'w') as f:\r\n json.dump(vector, f)\r\n f.write('\\n')\r\n\r\n print(\" ... wrote {}\".format(outfile))", "def write_list_to_file(ls, save_path):\n # Open in appendation mode given that this function may be called multiple\n # times on the same file (positive and negative sentiment are in separate\n # directories).\n out_file = open(save_path, \"w+\")\n for example in ls:\n out_file.write(example)\n out_file.write('\\n')", "def print_to_file(list_of_lines, file_path):\r\n with open(file_path) as output_file:\r\n write_lines(list_of_lines, output_file)", "def file_write(sp_length, sp_period, header, file_name):\n \n #specify filename and inform write\n out_file = open(file_name, \"w\")\n \n #add headers to file from list\n print(\"{0:>15}\".format(header[0]) ,\\\n \"{0:>15}\".format(header[1]) ,\\\n \"{0:>15}\".format(header[2]), file = out_file)\n \n #add data to file form lists \n for i in range(len(sp_length)):\n print(\"{0:>15}\".format(i) ,\\\n \"{0:>15.3f}\".format(sp_length[i]) ,\\\n \"{0:>15.3f}\".format(sp_period[i]), file = out_file)\n \n #close the file\n out_file.close()", "def list_to_file(sorted_list, filename):\n doc = Document()\n table = doc.add_table(rows=1, cols=2)\n hdr_cells = table.rows[0].cells\n hdr_cells[0].text = 'Word'\n hdr_cells[1].text = 'Occurrence'\n\n for key, value in sorted_list:\n row_cells = table.add_row().cells\n row_cells[0].text = key\n row_cells[1].text = str(value)\n\n doc.save(\"sorted - \" + filename)", "def make_list(filename):\n # with open(filename) as f:\n pois = []\n file = open(filename, 'r')\n for line in file:\n ls = line.split(\",\")\n entry = ((float(ls[1]), float(ls[2])), int(ls[3]))\n pois.append(entry)\n return pois", "def floats(self) -> List[NumericType]:\n return [float(v) for v in self._record]", "def write_sequence(list):\n pass", "def save_data_to_file(file_name, list_of_product_objects):\r\n try:\r\n objF = open(file_name, \"w\")\r\n for row in list_of_product_objects:\r\n objF.write(str(row[0]) + \",\" + str(row[1]) + \"\\n\")\r\n objF.close()\r\n except IOError:\r\n print(\"Unable to locate file\")", "def writeArray(fname,arr):\n fh = open(fname,'w')\n fh.write('%d\\n' % arr.shape[0])\n fh.write('%d\\n' % arr.shape[1])\n for x in range(arr.shape[0]):\n for y in range(arr.shape[1]):\n if arr.dtype == np.complex:\n fh.write('%.7e %.7e\\n' % (arr[x,y].real, arr[x,y].imag))\n else:\n fh.write('%.7e\\n' % (arr[x,y]))\n fh.close()", "def writeFloat(self, value: float):\n self._pack('!f', value)", "def all_measurements_to_list(ms, filename='n'):\n m_list = []\n for el in ms:\n # print(\"def all_measurements_to_list 010: \", el)\n for k, v in el.items():\n # print(\"def all_measurements_to_list 020: \", k, v)\n for meas in v:\n m_list.append(k + ',' + meas['name'])\n # print(\"def all_measurements_to_list 030: \", k + ' : ' + meas['name'])\n # print(m_list)\n # print(\"def all_measurements_to_list 040: length measurement list: \", len(m_list))\n #\n if filename != 'n':\n mlist = open(filename, 'w')\n for el in m_list:\n mlist.writelines(el + '\\n')\n mlist.close()\n # print(\"def all_measurements_to_list 050: list saved as: \", filename)\n else:\n # print(\"def all_measurements_to_list 051: list not saved as file.\")\n pass\n return m_list", "def write_nested_string_list_to_file(string_list, filename):\n with open(filename, 'w') as f:\n for i in range(0,len(string_list)):\n for element in string_list[i]:\n f.write(element+'\\t'+str(i)+'\\n')", "def VtFloat(list):\n return win32com.client.VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_R8, list)", "def save_gps_coordinates(points: list, file_name: str):\n\n with open(file_name, \"w\") as file:\n for point in points:\n if isinstance(point[0], list):\n str_point = str(point[0][0]) + \" \" + \\\n str(point[0][1]) + \" \" + str(point[1]) + \"\\n\"\n else:\n str_point = str(point[0]) + \" \" + str(point[1]) + \"\\n\"\n file.write(str_point)", "def sed_write_prob_mat_list_to_csv(na_list, prob_mat_list, out_path):\n f = gzip.open(out_path, 'w')\n for n in range(len(na_list)):\n na = na_list[n]\n prob_mat = prob_mat_list[n]\n (n_time, n_lb) = prob_mat.shape\n for i2 in xrange(n_time):\n f.write(na)\n for i3 in xrange(n_lb):\n f.write(\"\\t%.4f\" % prob_mat[i2, i3])\n f.write(\"\\n\")\n f.close()", "def toFloatList(values):\n\treturn list(map(lambda va: float(va), values))", "def writeCSV(path,aList):\n\twith open(path,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(aList)\n\tw.close()", "def write_float32(self, f: float) -> None:\n self.buffer += struct.pack(\"<f\", f)", "def write(writer: BitStreamWriter, value: float) -> None:\n\n writer.writeFloat64(value)", "def csv_writelist(file, oldfile, chlst, num):\n import csv\n writelist = checkdifferences(oldfile, chlst, num)\n print('before', writelist)\n with open('{}.csv'.format(file), 'w', newline='') as csvwrite:\n writer = csv.writer(csvwrite, delimiter=';')\n try:\n for eachrow in writelist:\n writer.writerow(eachrow)\n except:\n if TypeError:\n print('Typeerror')\n csvwrite.close()", "def write_list_to_file(input_list, output_folder, delimiter=\" \", header=None):\n with open(output_folder, 'w') as doc_out:\n if header:\n doc_out.write(delimiter.join(header) + \"\\n\")\n for element in input_list:\n doc_out.write(delimiter.join([str(i) for i in element]) + \"\\n\")", "def __type_of_elements_correct_floats_in_list(self):\n strTestName = 'Float elements in a list (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'type \\'tuple\\' parameter')\n RxCSObject.paramType('parameter1', (tuple))\n RxCSObject.parameter1 = (1, 4)\n\n RxCSObject.paramAddMan('parameter2', 'type \\'tuple or list\\' parameter')\n RxCSObject.paramType('parameter2', (tuple, list))\n RxCSObject.parameter2 = [4, 5, 9]\n\n RxCSObject.paramAddMan('parameter3', 'type \\'list\\' parameter')\n RxCSObject.paramType('parameter3', (list))\n RxCSObject.paramTypeEl('parameter3', (float))\n RxCSObject.parameter3 = [4.1, 5.3, 9.0]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def export_ascii(filename, data, lats, lons):\n ascw = open(filename+\".asc\", \"w\")\n ascw.write(\"\"\"ncols %d\nnrows %d\nxllcenter %.2f\nyllcenter %.2f\ncellsize %.2f\nNODATA_value -9999\"\"\" % (\n len(lons), len(lats),\n lons[0], lats[0],\n lons[1] - lons[0]))\n for i in reversed(range(0, data.shape[0])):\n ascw.write(\"\\n\")\n for j in range(0, data.shape[1]):\n x, y = \"%.2f\" % lons[j], \"%.2f\" % lats[i]\n if j > 0:\n ascw.write(\" \")\n ascw.write(\"%.6f\" % data[i, j])\n ascw.close()", "def add_to_csv(file_name, single_list):\n final_list = read_csv(file_name)\n writer = csv.writer(open(file_name, 'wb'), delimiter=',',quoting=csv.QUOTE_MINIMAL)\n final_list.append(single_list)\n for x in final_list:\n writer.writerow(x)", "def write_to_file(output, data, datafields):\n if (len(data) != len(datafields)):\n print \"Error! number of data fields != number of headers!\"\n print 'len: ', len(data), len(datafields)\n print 'shape: ', np.shape(data), np.shape(datafields)\n\n ofile = open(output,'w')\n ofile.write(\"# g(r) in the xy-plane from 2Drdf.py\\n\")\n header = \"# chunk \"\n for element in datafields:\n header += element + \" \"\n\n header = header + '\\n'\n ofile.write(header)\n \n it = 0\n for i in xrange(len(data[0])):\n line = str(it) + \" \"\n it += 1\n for j in xrange(len(data)):\n line += str(float(data[j][i])) + \" \"\n line += \"\\n\"\n ofile.write(line)\n \n ofile.close()\n print \"Finished writing file: \", output", "def save_gps_coordinates_raw(points: list, file_name: str):\n\n with open(file_name, \"w\") as file:\n for point in points:\n file.write(str(point) + \"\\n\")", "def write(self, fname, headers=False, labels=True):\n strlist = self.string_lines(headers=headers, labels=labels)\n \n \n if not hasattr(fname, 'writelines'):\n with open(fname, 'w') as f:\n f.writelines(strlist)\n else:\n fname.writelines(strlist)\n return", "def write_list(self, data, delimiter=\"\\n\"):\n if self.check_list_exists(data):\n with opened_w_error(self.filename, self.lock, \"a\") as (f, err):\n if err:\n logging.error(\"File '%s'. Error: %s\", self.filename, err)\n else:\n f.write(delimiter.join(data))\n else:\n logging.error(\"Data isn't list or it's not contains elements\")", "def save_to_file(cls, list_objs):\n the_list = []\n if list_objs is not None:\n for stuff in list_objs:\n new_stuff = stuff.to_dictionary()\n the_list.append(new_stuff)\n the_list = Base.to_json_string(the_list)\n with open(\"{}.json\".format(cls.__name__), mode='w') as f:\n f.write(str(the_list))", "def save_csv(filename, save_list):\n with open(filename, mode='w') as csv:\n csv.writelines([','.join(item) + '\\n' for item in save_list])", "def fromlist(self, floats):\n self.header = {}\n params = [p for p in self]\n min_len = min(len(params), len(floats))\n for param, value in zip(params[:min_len], floats[:min_len]):\n param.value = value\n for param in params[min_len:]:\n param.value = param.default_value", "def convert_to_floats(str_lst):\n\n float_order = []\n for string in str_lst:\n float_order.append(float(string))\n\n return float_order", "def csv_save_list(list_data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n for item in list_data:\n if encoding is not None:\n writer.writerow([item.encode(encoding)])\n else:\n writer.writerow([item])", "def save_data(data,file):\n\n f = open(file, mode='w',encoding='utf-8', buffering=1024)\n for t in data:\n f.write(str(t[0]) + ', ' + str(t[1]) + '\\n')\n f.close()", "def write_features_to_file(filename,locs,desc):\n savetxt(filename, hstack((locs, desc)))", "def export(tako_list, filename):\n for tak in tako_list:\n tak = tak[0]\n l1 = [tak.ident, \"a\"]\n for gen in tak.genome.weightchr_a:\n l1.append(gen.ident)\n l1.append(gen.weight)\n l1.append(gen.mut_rate)\n l1.append(gen.dom)\n f = os.path.join(\"Data\", (filename[:-4] + \" gene data.csv\"))\n with open(f, 'a', newline=\"\") as csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(l1)\n if len(tak.genome.weightchr_b) != 0:\n l2 = [tak.ident, \"b\"]\n for gen in tak.genome.weightchr_b:\n l2.append(gen.ident)\n l2.append(gen.weight)\n l2.append(gen.mut_rate)\n l2.append(gen.dom) \n writ.writerow(l2)", "def writeQrels(qrelList, fileName):\n with open(fileName, 'w') as f:\n for e in qrelList:\n f.write(qrelEntry2Str(e))\n f.write('\\n')" ]
[ "0.73861384", "0.6875439", "0.6696524", "0.64751726", "0.64447385", "0.64314556", "0.62464565", "0.62336665", "0.62258744", "0.6136174", "0.61130786", "0.60936034", "0.6083807", "0.6069135", "0.5993641", "0.59823793", "0.59138167", "0.5911231", "0.59085566", "0.584276", "0.5834877", "0.5827411", "0.5823831", "0.580929", "0.5808705", "0.57922816", "0.5739846", "0.5727965", "0.5724352", "0.57087827", "0.57049376", "0.5702529", "0.5701397", "0.56966", "0.5675191", "0.5659411", "0.5651064", "0.5643869", "0.5631769", "0.5628793", "0.5618548", "0.5605667", "0.56039727", "0.5596357", "0.5594215", "0.5589557", "0.55736226", "0.5561954", "0.55378073", "0.5529769", "0.5528404", "0.55198514", "0.5519538", "0.54917717", "0.5483224", "0.5467709", "0.5456664", "0.54508734", "0.544424", "0.5437119", "0.54176444", "0.54074", "0.5395247", "0.53944445", "0.5390715", "0.5387203", "0.53853214", "0.5381096", "0.53793794", "0.53726125", "0.5371945", "0.537021", "0.5366072", "0.53542084", "0.53514993", "0.5345699", "0.53440124", "0.5332228", "0.5329433", "0.5322063", "0.5321217", "0.53170186", "0.53160435", "0.5312182", "0.53105885", "0.5310083", "0.52973485", "0.52928036", "0.52913976", "0.52778494", "0.5274437", "0.52699727", "0.5267823", "0.52645105", "0.525886", "0.52455467", "0.52446246", "0.52440387", "0.52422684", "0.5240786" ]
0.6402331
6
Rotate about a different center
def rotate_about_center(alpha, cx, cy): cmp1 = compose_transform2(0, -cx, -cy, 1, alpha, 0, 0, 1) cmp2 = compose_transform2(cmp1[0], cmp1[1], cmp1[2], cmp1[3], 0, cx, cy, 1) # return compalpha, comptrans.at(0),comptrans.at(1), compscale return cmp2[0], cmp2[1], cmp2[2], cmp2[3]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotation_center(self, *args, **kwargs) -> Any:\n pass", "def rotate(self,center, angle):\n \n self.coord = [x-np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n alpha = angle\n R = np.array([[np.cos(alpha),-np.sin(alpha)],[np.sin(alpha),np.cos(alpha)]])\n \n for i in range(len(self.coord)):\n self.coord[i] = np.squeeze([np.dot([x],R) for x in self.coord[i]])\n\n self.coord = [x+np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n return self", "def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._center = matrix.dot(self._center)", "def rotation_pivot_to_center(self):\n pass", "def rot_center(self):\n loc = self.rect.center\n self.image = pygame.transform.rotate(self.current_sprite_alpha, self.rot)\n self.rect = self.image.get_rect()\n self.rect.center = loc", "def rotate_shape(shape, xy_center, angle_degrees):", "def rotate90(self):", "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def rotate(self, center, angle):\n center = self.center.rotate(center, angle)\n angle = self.angle + angle\n return self.copy(center=center, angle=angle)", "def rot_center(image, angle):\r\n orig_rect = image.get_rect()\r\n rot_image = transform.rotate(image, angle)\r\n rot_rect = orig_rect.copy()\r\n rot_rect.center = rot_image.get_rect().center\r\n rot_image = rot_image.subsurface(rot_rect).copy()\r\n return rot_image", "def rot_center(self, image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def rot_center(self, image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def rot_center(self):\n x, y = pygame.mouse.get_pos()\n relx, rely = x - self.rect.x, y - self.rect.y\n angle = math.atan2(relx, rely)\n angle = math.degrees(angle)\n orig_rect = self.image_original.get_rect()\n rot_image = pygame.transform.rotate(self.image_original, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def rotate(self, radians, center):\n degrees = radians * 180 / math.pi\n matrix = opencv.getRotationMatrix2D(center.tuple(), degrees, 1.0)\n\n rotated = opencv.warpAffine(self.img, matrix, (self.width, self.height))\n return Image(rotated)", "def rotate(self, angle):\n n, a = Vector.polar([self.x, self.y])\n a += angle\n self.x = n * cos(a)\n self.y = n * sin(a)", "def rotate(self):\r\n self.rot = (self.vel.y * -3)\r\n if self.rot < -90:\r\n self.rot = -90\r\n \r\n new_image = pg.transform.rotate(self.bird_sprites[self.sprite_frame], self.rot)\r\n old_center = self.rect.center\r\n self.image = new_image\r\n self.rect = self.image.get_rect()\r\n self.rect.center = old_center\r\n # self.animate()\r", "def rot_center(image, angle):\r\n orig_rect = image.get_rect()\r\n rot_image = pygame.transform.rotate(image, angle)\r\n rot_rect = orig_rect.copy()\r\n rot_rect.center = rot_image.get_rect().center\r\n rot_image = rot_image.subsurface(rot_rect).copy()\r\n return rot_image", "def rotater(self, direction):\n if self.center:\n mapDel(self, theFallen)\n rotate(self, direction)\n mapAdd(self, theFallen)", "def rotate(self):\r\n # Rotate the image.\r\n self.image = pg.transform.rotozoom(self.orig_image, -self.angle, 1)\r\n # Rotate the offset vector.\r\n offset_rotated = self.offset.rotate(self.angle)\r\n print(\"offset_rotated:\", offset_rotated)\r\n # Create a new rect with the center of the sprite + the offset.\r\n self.rect = self.image.get_rect(center=self.pos+offset_rotated)", "def rotate(self, angle=45, center=(0, 0)):\n if angle == 0:\n return self\n if hasattr(center, \"center\"):\n center = center.center\n self.rotation += angle\n self.origin = _rotate_points(self.origin, angle, center)\n if self.owner is not None:\n self.owner._bb_valid = False\n return self", "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def orbit_rotate(center, obj, d_ang, dist = 0, ang = -20):\n if ang == -20:\n\n dx = obj.rect.centerx - center.rect.centerx\n dy = obj.rect.centery - center.rect.centery\n\n if dx > 0 and dy < 0:\n ang = abs(np.rad2deg(np.arctan(dx/dy)))\n elif dx < 0 and dy < 0:\n ang = abs(np.rad2deg(np.arctan(dy/dx)))\n elif dx > 0 and dy > 0:\n ang = abs(np.rad2deg(np.arctan(dy/dx)))\n elif dx < 0 and dy > 0:\n ang = abs(np.rad2deg(np.arctan(dx/dy)))\n else:\n ang = 90\n else:\n\n obj.orbit_ang += d_ang\n\n if obj.orbit_ang > 360:\n obj.orbit_ang += -360\n elif obj.orbit_ang < 0:\n obj.orbit_ang += 360\n\n ang = obj.orbit_ang\n\n if dist == 0:\n pass\n\n obj.rect.centerx = center.rect.centerx + dist*(np.sin(np.deg2rad(ang)))\n obj.rect.centery = center.rect.centery + dist*(np.cos(np.deg2rad(ang)))", "def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)", "def rotate(self, angle=45, center=(0, 0)):\n self.position = _rotate_points(self.position, angle=angle, center=center)\n return self", "def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._normal = matrix.dot(self._normal)\n self._position = matrix.dot(self._position)", "def rotate(self):\n pass", "def rotate1(self, alpha=0.0, cen=None):\n # convert degrees to rads\n alpha = (alpha / 360) * 2 * pi\n # centre of rotation is center of gravity of the figure,\n # unless otherwise specified\n if cen is None:\n cen = self.centre()\n # change origin of coordinates to local ones\n nf = self.translate1(-cen.x, -cen.y)\n for p in nf.point:\n x1 = p.x * cos(alpha) + p.y * sin(alpha)\n y1 = -p.x * sin(alpha) + p.y * cos(alpha)\n p.x,p.y = x1,y1\n #change back coordinates to global\n return nf.translate1(+cen.x, +cen.y)", "def blitRotateCenter(surf, image, topleft, angle):\n new_rect = image.get_rect(center=image.get_rect(topleft=topleft).center)\n\n surf.blit(image, new_rect.topleft)", "def rot_center(image,rect,angle):\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = rot_image.get_rect(center=rect.center)\n return rot_image, rot_rect", "def rot_center(image, rect, angle):\n\trot_image = pygame.transform.rotate(image, angle)\n\trot_rect = rot_image.get_rect(center=rect.center)\n\treturn rot_image,rot_rect", "def rot_center(self, image, position, angle):\n rect = image.get_rect().move(*position)\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = rot_image.get_rect(center=rect.center)\n return rot_image, rot_rect", "def rotate(self):\n\n self.x, self.y = self.scene.player.facing\n rotation = degrees(atan2(self.x, self.y))\n self.image = transform.rotate(self.image, rotation)\n self.rect = self.image.get_rect(left=self.rect.left, top=self.rect.top)\n self.rect.centerx = self.scene.player.rect.centerx + (8 * self.x)\n self.rect.centery = self.scene.player.rect.centery + (8 * self.y)", "def srotate(self, angle):\n\n self.angle = self.angle + angle", "def obInit(position, angle, center):\n\tif angle > 360.0:\n\t angle = angle - 360\n\tif angle < - 360:\n\t angle = -angle - 360\n\tif angle > -360 and angle < 0:\n\t angle = -angle\n\tadjPosition = position - center\n\tnewposition = adjPosition.rotate(angle) + center\n\treturn newposition", "def rotate(self, camera, x, y, last_x, last_y, center_x, center_y):\n\n rx = -360.0 * (x - last_x) / center_x\n ry = 90.0 * (y - last_y) / center_y\n\n camera_up = camera.GetViewUp()\n camera_proj = camera.GetDirectionOfProjection()\n global_up = self.mp_global_up\n\n axis_right = ut.normalize(np.cross(camera_proj, global_up))\n angle_up = np.arccos(np.dot(camera_up, global_up)) * 180.0 / np.pi\n\n if angle_up + ry >= 90.0:\n ry = 89.0 - angle_up\n\n fp = camera.GetFocalPoint()\n transform = vtk.vtkTransform()\n transform.Identity()\n transform.Translate(fp[0], fp[1], fp[2])\n transform.RotateWXYZ(rx, global_up)\n transform.RotateWXYZ(ry, axis_right)\n transform.Translate(-fp[0], -fp[1], -fp[2])\n camera.ApplyTransform(transform)\n\n camera.OrthogonalizeViewUp()\n\n self.ren_win.Render()", "def rotation_center_x(self, *args, **kwargs) -> Any:\n pass", "def rotation_center_y(self, *args, **kwargs) -> Any:\n pass", "def rotated(self, vert, deg, axis=[0, 1, 0], cam=None, texture=None):\n import cv2\n new_rot = cv2.Rodrigues(np.deg2rad(deg) * np.array(axis))[0]\n new_rot = convert_as(torch.FloatTensor(new_rot), vert)\n\n center = vert.mean(0)\n new_vert = torch.t(torch.matmul(new_rot,\n torch.t(vert - center))) + center\n # new_vert = torch.matmul(vert - center, new_rot) + center\n\n return self.__call__(new_vert, cams=cam, texture=texture)", "def _rotate_about_origin(self, angle, axis):\n print 'Invoked abstract {}._rotate_about_origin({}, {})'.format(\n self, angle, axis)\n return", "def center(self, degree=0, speed=0):\n if degree == 0:\n sign = 1\n else:\n sign = (degree/abs(degree))\n\n spin = sign*((abs(degree)**.5)/10)\n self.set_vals(speed=speed, spin=spin)", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n rM = Matrix([\n [ca, -sa],\n [sa, ca]\n ])\n p0 = self.p0\n self.c = p0 + rM @ (self.c - p0)\n dp = p0 - self.c\n self.a0 = atan2(dp.y, dp.x)\n return self", "def rotate(self, angle_radians):\n cos = math.cos(angle_radians)\n sin = math.sin(angle_radians)\n x = self.x*cos - self.y*sin\n y = self.x*sin + self.y*cos\n self.x = x\n self.y = y", "def setPlotRotation(ang, x,y):\n dislin.trfrot(ang,x,y)", "def rotate(self, point, rotation):\n\n self.rotation = self.rotation + rotation\n\n # get the point before the rotation\n ptBeforeScale = self.mapToScene(point)\n\n # rotate the view\n QGraphicsView.translate(self, point.x(), point.y())\n QGraphicsView.rotate(self, rotation)\n QGraphicsView.translate(self, -point.x(), -point.y())\n\n # counter rotate the selection point\n t = QTransform()\n t.rotate(-rotation)\n ptAfterScale = t.map(ptBeforeScale)\n\n # calculate the offset and update\n offset = ptBeforeScale - ptAfterScale\n newCenter = self.centerPoint - offset\n self.setCenterPoint(newCenter)", "def rotate(self, rotation):\n self.coords = dot(rotation, self.coords)\n return self", "def corrected_rotation(x_arr, mu):\n if x_arr < (mu-180):\n x_arr += 360\n elif x_arr > mu+180:\n x_arr -= 360\n\n return x_arr", "def rotate(self, rad):\n s, c = [f(rad) for f in (math.sin, math.cos)]\n x, y = (c*self.x - s*self.y, s*self.x + c*self.y)\n return Point(x,y)", "def rotate(self, angle, point=None):\n # Actually not working\n if not point:\n point = self.center\n for i in range(len(self.points)):\n self.points[i].rotate(angle, point)", "def rotate(self, rad):\n s, c = [f(rad) for f in (math.sin, math.cos)]\n x, y = (c * self.x - s * self.y, s * self.x + c * self.y)\n return Point(x, y)", "def _rotate(self, tetrino):\n tetrino.rotate()", "def rotate((x,y)):\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n w,h = image_size()\n if orientation == 0: return (x,y)\n if orientation == -90: return (h-y,x)\n if orientation == 90: return (y,w-x)\n if orientation == 180: return (w-x,h-y)\n return (x,y)", "def rotate(self, angle=45, center=(0, 0)):\n super().rotate(angle=angle * pi / 180, center=center)\n if self.parent is not None:\n self.parent._bb_valid = False\n return self", "def _rad_center(self):\n return ((self.rad_hi + self.rad_lo) / 2).to(\"deg\")", "def rotate_arcs(self):\n\n if self.arc_direction:\n self.thick_arc_start_angle -= 5\n self.thick_arc_end_angle -= 5\n\n self.thin_arc_start_angle += 5\n self.thin_arc_end_angle += 5\n else:\n self.thick_arc_start_angle += 5\n self.thick_arc_end_angle += 5\n\n self.thin_arc_start_angle -= 5\n self.thin_arc_end_angle -= 5", "def rotate(self, angle):\n image_center = np.array(self.img.shape[1::-1]) / 2\n rot_mat = cv2.getRotationMatrix2D(tuple(image_center), angle, 1.0)\n\n self.img = cv2.warpAffine(\n self.img, rot_mat, self.img.shape[1::-1], flags=cv2.INTER_LINEAR\n )\n\n self.edits.append(f\"rotate:{angle}\")\n return self", "def _rotate_origin(x, y, rotation_deg):\n rotation_rad = np.deg2rad(rotation_deg)\n # Rotation is set negative to make counterclockwise rotation\n xx = x * np.cos(-rotation_rad) + y * np.sin(-rotation_rad)\n yy = -x * np.sin(-rotation_rad) + y * np.cos(-rotation_rad)\n return xx, yy", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n self.v = Matrix([\n [ca, -sa],\n [sa, ca]\n ]) @ self.v\n return self", "def rotate(self, angle):\n rotmat = rotation_matrix_2d(angle)\n rotated = np.dot(rotmat.T, [self.pix_x.value, self.pix_y.value])\n self.pix_x = rotated[0] * self.pix_x.unit\n self.pix_y = rotated[1] * self.pix_x.unit\n self.pix_rotation -= angle", "def _rotate_coords(self, x, y, theta, ox, oy):\n s, c = self._pkgs['numpy'].sin(theta), self._pkgs['numpy'].cos(theta)\n x, y = self._pkgs['numpy'].asarray(x) - ox, self._pkgs['numpy'].asarray(y) - oy\n return x * c - y * s + ox, x * s + y * c + oy", "def _spin(self):\n center = self.rect.center\n self.dizzy += 12 # rotate 12 degree clockwise\n\n if self.dizzy >= 360:\n self.dizzy = 0\n self.image = self.original # reset the image to its original ones after rotated\n else:\n self.image = pygame.transform.rotate(self.original, self.dizzy)\n\n self.rect = self.image.get_rect()\n self.rect.center = center # make sure the image would not move when spinning", "def _spin(self):\n center= self.rect.center\n self.dizzy= self.dizzy + 10 #12\n if self.dizzy >= 360:\n self.dizzy = 0\n self.image = self.original\n else:\n rotate= pygame.transform.rotate\n self.image= rotate(self.original, self.dizzy)\n self.rect= self.image.get_rect(center= center)", "def qrotate(self, angle):\n\n q = int(round(angle / 90.0)) % 4\n a = 90.0 * q\n\n if (q == 0):\n pass\n elif (q == 1):\n self.srotate(a)\n self.center = (self.q_size[1] - self.center[1],\n 0 + self.center[0])\n elif (q == 2):\n self.srotate(a)\n self.center = (self.q_size[0] - self.center[0],\n self.q_size[1] - self.center[1])\n elif (q == 3):\n self.srotate(a)\n self.center = (0 + self.center[1],\n self.q_size[0] - self.center[0])", "def rotate(self, rx: float = 0, ry: float = 0, rz: float = 0,\n center: Onion[Iterable[Onion[float, int]], Point3D] = None) -> 'Component':\n transform = self._local_transform\n\n if center is None:\n center_point = self._origin\n elif isinstance(center, Point3D):\n center_point = center\n elif isinstance(center, Point):\n center_point = center.point\n else:\n center_coordinates = list(center)[0:3]\n while len(center_coordinates) < 3:\n center_coordinates.append(0)\n center_point = Point3D.create(*center_coordinates)\n\n if rx != 0:\n rotation = Matrix3D.create()\n rotation.setToRotation(math.radians(rx), self._pos_x, center_point)\n transform.transformBy(rotation)\n if ry != 0:\n rotation = Matrix3D.create()\n rotation.setToRotation(math.radians(ry), self._pos_y, center_point)\n transform.transformBy(rotation)\n if rz != 0:\n rotation = Matrix3D.create()\n rotation.setToRotation(math.radians(rz), self._pos_z, center_point)\n transform.transformBy(rotation)\n self._reset_cache()\n return self", "def rotation_around_axis(self,axis,angle,**kwargs):\n xyz = self.get('x,y,z',**kwargs)\n\n # get the data\n ct,st = np.cos(angle),np.sin(angle)\n ux,uy,uz = axis\n\n # get the center of the molecule\n xyz0 = np.mean(xyz,0)\n\n # definition of the rotation matrix\n # see https://en.wikipedia.org/wiki/Rotation_matrix\n rot_mat = np.array([\n [ct + ux**2*(1-ct), ux*uy*(1-ct) - uz*st, ux*uz*(1-ct) + uy*st],\n [uy*ux*(1-ct) + uz*st, ct + uy**2*(1-ct), uy*uz*(1-ct) - ux*st],\n [uz*ux*(1-ct) - uy*st, uz*uy*(1-ct) + ux*st, ct + uz**2*(1-ct) ]])\n\n # apply the rotation\n xyz = np.dot(rot_mat,(xyz-xyz0).T).T + xyz0\n self.update('x,y,z',xyz,**kwargs)\n\n return xyz0", "def rotatedBy(self, angle):\n\t\tx, y = self.x, self.y\n\t\tc, s = cos(angle), sin(angle)\n\t\treturn Vector((c * x) - (s * y), (s * x) + (c * y))", "def _rotate_points(points, angle = 45, center = (0,0)):\n if angle == 0:\n return points\n angle = angle*np.pi/180\n ca = np.cos(angle)\n sa = np.sin(angle)\n sa = np.array((-sa, sa))\n c0 = np.array(center)\n if np.asarray(points).ndim == 2:\n return (points - c0) * ca + (points - c0)[:,::-1] * sa + c0\n if np.asarray(points).ndim == 1:\n return (points - c0) * ca + (points - c0)[::-1] * sa + c0", "def rotation_matrix(self,rot_mat,center=True,**kwargs):\n xyz = self.get('x,y,z',**kwargs)\n\n if center:\n xyz0 = np.mean(xyz)\n xyz = np.dot(rot_mat,(xyz-xyz0).T).T + xyz0\n else:\n xyz = np.dot(rot_mat,(xyz).T).T\n self.update('x,y,z',xyz,**kwargs)", "def centralAngle(self):\n global central_angle\n central_angle = always_redraw(\n lambda : Angle(radius_horiz, radius_ang, radius=0.25, stroke_color=YELLOW)\n )\n\n global central_angle_label\n central_angle_label = always_redraw(\n lambda : MathTex(\"x\", stroke_color=GREEN).scale(0.75).move_to(\n LEFT*5+UP*(0.3*self.x_max*np.sin(0.5*theta.get_value()*DEGREES))+RIGHT*(0.3*self.x_max*np.cos(0.5*theta.get_value()*DEGREES)))\n )\n\n self.play(Write(central_angle), Write(central_angle_label))", "def rotate_points(x,y,theta,center=[0,0],units='radians'):\r\n # Convert theta to radians\r\n if units == 'degrees':\r\n theta = theta/180.0*np.pi\r\n elif units == 'radians':\r\n pass\r\n else:\r\n print 'Invalid input parameter for angle units! Assuming radians'\r\n \r\n # Compute rotation matrix constants\r\n s = np.sin(theta)\r\n c = np.cos(theta)\r\n\r\n # translate points so that the center is at the to origin:\r\n x -= center[0]\r\n y -= center[1]\r\n\r\n # rotate points by multiplying by rotation matrix\r\n xnew = x*c - y*s\r\n ynew = x*s + y*c\r\n\r\n # translate points back to original location\r\n xnew += center[0]\r\n ynew += center[1]\r\n \r\n return xnew,ynew", "def _rotate_points(points, angle=45, center=(0, 0)):\n if angle == 0:\n return points\n angle = angle * pi / 180\n ca = cos(angle)\n sa = sin(angle)\n sa = np.array((-sa, sa))\n c0 = np.array(center)\n if np.asarray(points).ndim == 2:\n return (points - c0) * ca + (points - c0)[:, ::-1] * sa + c0\n if np.asarray(points).ndim == 1:\n return (points - c0) * ca + (points - c0)[::-1] * sa + c0", "def rotate(contr):\n # get the object this script is attached to\n camera = contr.owner\n\n # Get sensor named Mouse\n mouse = contr.sensors['Mouse']\n\n if mouse.positive:\n # get width and height of game window\n width = Rasterizer.getWindowWidth()\n height = Rasterizer.getWindowHeight()\n\n # get mouse movement from function\n move = mouse_move(camera, mouse, width, height)\n\n # set mouse sensitivity\n sensitivity = camera['Sensitivity']\n\n # Amount, direction and sensitivity\n leftRight = move[0] * sensitivity\n upDown = move[1] * sensitivity\n\n # set the values\n camera.applyRotation( [0.0, 0.0, leftRight], 0 )\n camera.applyRotation( [upDown, 0.0, 0.0], 1 )\n\n # Center mouse in game window\n # Using the '//' operator (floor division) to produce an integer result\n Rasterizer.setMousePosition(width//2, height//2)", "def rotate(self,angle):\n radians = (angle * math.pi)/180\n self.direction += angle\n for object in self.objects:\n y = object.position[0]\n x = object.position[1]\n\n object.position[0] = x * math.sin(radians) + y * math.cos(radians)\n object.position[1] = x * math.cos(radians) - y * math.sin(radians)", "def rotate_orbit(self):\n try:\n ang = self.orbit_speed * self.time_scale / self.refresh_rate\n self.obj.rotate(angle=ang, axis=vector(0, 1, 0), origin=self.star.obj.pos)\n self.sum_ang += ang\n except ZeroDivisionError:\n print(\"ERROR: REFRESH_RATE is 0\")\n except (AttributeError, TypeError):\n print(\"ERROR: wrong arguments type while initializing!!\")", "def Rotate(X,Y,sintheta=0,costheta=1):\n X1 = X * costheta - Y * sintheta \n Y1 = X * sintheta + Y * costheta \n return X1,Y1", "def _rotate(self, affine):\n dims = affine.shape[0]\n if not np.isscalar(self.rotation):\n raise Exception('this class requires exactly one entry for rotation!')\n theta = (self.deformrandomstate.rand() - 0.5) * 2 * self.rotation\n if dims == 4:\n\n # sample unit vector:\n u = np.random.random(3)\n u /= np.sqrt(np.sum([uu ** 2 for uu in u]) + 1e-8)\n ct = np.cos(theta)\n st = np.sin(theta)\n rot = np.eye(4)\n rot[:3, :3] = [\n [ct + u[0] ** 2 * (1 - ct), u[0] * u[1] * (1 - ct) - u[2] * st, u[0] * u[2] * (1 - ct) + u[2] * st],\n [u[1] * u[0] * (1 - ct) + u[2] * st, ct + u[1] ** 2 * (1 - ct), u[1] * u[2] * (1 - ct) - u[0] * st],\n [u[2] * u[0] * (1 - ct) - u[1] * st, u[2] * u[1] * (1 - ct) + u[0] * st, ct + u[2] ** 2 * (1 - ct)]]\n\n elif dims == 3:\n rot = np.eye(3)\n rot[:2, :2] = np.asarray([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])\n else:\n raise Exception(\n 'implement this for each dimension, since not yet implemented for dimension {}'.format(dims))\n\n return np.matmul(rot, affine)", "def rotate(self, angle):\n old_angle, tilt = self.rotation\n new_angle = old_angle + angle\n while new_angle > 90:\n new_angle = new_angle - 90\n while angle < -90:\n new_angle = new_angle + 90\n self.rotation = (new_angle, tilt)", "def RotateObject(object_id, center_point, rotation_angle, axis=None, copy=False):\n rc = RotateObjects(object_id, center_point, rotation_angle, axis, copy)\n if rc: return rc[0]\n return scriptcontext.errorhandler()", "def make_rotation(self, rotation):\n if rotation == \"r\":\n self.facing += 1\n else:\n self.facing -= 1\n\n if self.facing > 3:\n self.facing = self.facing - 4\n elif self.facing < 0:\n self.facing = self.facing + 4", "def _rotate_around_point(point, angle, center):\n x = point.x - center.x\n y = point.y - center.y\n cos = math.cos(angle)\n sin = math.sin(angle)\n x_ = x * cos - y * sin\n y_ = x * sin + y * cos\n return Point(x_, y_) + center", "def img_rotate(img, angle, center, fillval=0):\n rows, cols = img.shape[:2]\n M = cv2.getRotationMatrix2D(center, angle, 1)\n return cv2.warpAffine(img, M, (cols, rows), borderValue=fillval)", "def rotator(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c,-s],[s,c]])", "def steerright(self):\n self.direction = self.direction-self.steering\n if self.direction < 0:\n self.direction = 360-90\n self.image, self.rect = rot_center(self.image_orig,self.rect,self.direction)", "def rotation(self, *args, **kwargs) -> Any:\n pass", "def rotate(self, *args, **kwargs): # real signature unknown\n pass", "def rotate(img, deg, center=None):\n\tgray = grayscale(img)\n\trows, cols = gray.shape\n\tif center is None:\n\t\tM = cv2.getRotationMatrix2D((cols/2, rows/2), deg, 1)\n\telse:\n\t\tM = cv2.getRotationMatrix2D(center, deg, 1)\n\tdst = cv2.warpAffine(gray, M, (cols, rows))\n\treturn dst", "def rotaxis(proj, N_steps):\n a = proj.shape[1]//4\n b = 3 * proj.shape[1]//4\n c = proj.shape[2]//4\n d = 3 * proj.shape[2]//4\n \n \n cent = []\n N_rot = proj.shape[0] - 180 * N_steps\n \n for i in range(N_rot):\n distances = shift(proj[i, a:b, c:d], np.flip(proj[i + N_steps*180, a:b, c:d] ,1))\n cent.append(proj[i].shape[1]/2 + distances[1]/2)\n \n return cent", "def rotate(u, w, th):\n ur = np.cos(th) * u + np.sin(th) * w\n wr = -np.sin(th) * u + np.cos(th) * w\n return ur, wr", "def rotate(X):\n return X", "def rotate(self, coord):\r\n [x, y] = coord\r\n xrot = x*math.cos(self.heading) - y*math.sin(self.heading)\r\n yrot = x*math.sin(self.heading) + y*math.cos(self.heading)\r\n return [xrot, yrot]", "def _rotate_coordinate(self, x, y, angle):\n\n sin = math.sin(angle)\n cos = math.cos(angle)\n\n x_ = x * cos - y * sin\n y_ = x * sin + y * cos\n\n return (x_, y_)", "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)", "def rotate(self, vect, angle):\n self.pl.Rotation = Rotation(vect, angle)\n\n self.comp.Placement = self.pl\n self.box.Placement = self.pl", "def rotate(self, rotation_matrix, centre=None):\n locations = self.locations.rotate(rotation_matrix, centre)\n if self.orientations is not None:\n orientations = self.orientations.rotate(rotation_matrix, None)\n else:\n orientations = None\n pcs = self.pcs.rotate(rotation_matrix, centre)\n self.locations = locations\n self.orientations = orientations\n self.pcs = pcs\n return self", "def rotate(self):\n tmp = self.width\n self.width = self.height\n self.height = tmp\n self.rotated = not self.rotated", "def rotate((x, y), theta):\n\n return math.cos(theta) * x + math.sin(theta) * y, -math.sin(theta) * x + math.cos(theta) * y", "def rotatedView(img, angle, enlarge=True, extend=Views.extendBorder):\n cx = img.dimension(0) / 2.0\n cy = img.dimension(1) / 2.0\n toCenter = AffineTransform2D()\n toCenter.translate(-cx, -cy)\n rotation = AffineTransform2D()\n # Step 1: place origin of rotation at the center of the image\n rotation.preConcatenate(toCenter)\n # Step 2: rotate around the Z axis\n rotation.rotate(radians(angle))\n # Step 3: undo translation to the center\n rotation.preConcatenate(toCenter.inverse())\n rotated = RV.transform(Views.interpolate(extend(img),\n NLinearInterpolatorFactory()), rotation)\n if enlarge:\n # Bounds:\n bounds = repeat((sys.maxint, 0)) # initial upper- and lower-bound values \n # for min, max to compare against \n transformed = zeros(2, 'f')\n for corner in product(*zip(repeat(0), Intervals.maxAsLongArray(img))):\n rotation.apply(corner, transformed)\n bounds = [(min(vmin, int(floor(v))), max(vmax, int(ceil(v))))\n for (vmin, vmax), v in zip(bounds, transformed)]\n minC, maxC = map(list, zip(*bounds)) # transpose list of 2 pairs\n # into 2 lists of 2 values\n imgRot = Views.zeroMin(Views.interval(rotated, minC, maxC))\n else:\n imgRot = Views.interval(rotated, img)\n return imgRot", "def rot(wx, wy, order, dist):\n for _ in range(dist//90):\n if order == \"R\":\n wx, wy = wy, -wx\n elif order == \"L\":\n wx, wy = -wy, wx\n return wx, wy" ]
[ "0.75949824", "0.74933934", "0.7462114", "0.74131936", "0.738581", "0.7327082", "0.7053461", "0.70344794", "0.69995296", "0.69260097", "0.69119567", "0.69119567", "0.68725055", "0.676936", "0.67615163", "0.6755245", "0.6707772", "0.6677421", "0.6677066", "0.667549", "0.66710836", "0.66710836", "0.66710836", "0.66426957", "0.6642177", "0.6630249", "0.6614379", "0.6593436", "0.6585946", "0.6578317", "0.6567942", "0.65398717", "0.6538059", "0.6536131", "0.6512959", "0.6490096", "0.648941", "0.6475743", "0.6474244", "0.6444226", "0.64306116", "0.64111507", "0.6405365", "0.6380405", "0.6378815", "0.6369065", "0.63686556", "0.6365162", "0.63439184", "0.63351786", "0.6335058", "0.63100326", "0.631001", "0.6302167", "0.6284896", "0.62805974", "0.62734604", "0.62716186", "0.6250824", "0.6226161", "0.62118536", "0.6192984", "0.61881787", "0.6187437", "0.616411", "0.61635953", "0.61400735", "0.6135333", "0.61326355", "0.6113488", "0.61123496", "0.61051255", "0.610204", "0.609939", "0.6095908", "0.6092345", "0.609018", "0.6087238", "0.60858274", "0.60821176", "0.6081199", "0.60798514", "0.6069803", "0.60585666", "0.60580945", "0.6039308", "0.6038891", "0.6032069", "0.6031962", "0.602628", "0.6017601", "0.6008364", "0.60066456", "0.60049623", "0.5997744", "0.59959495", "0.5993768", "0.59901035", "0.5989353", "0.5987223" ]
0.6676087
19
linearly interpolate a 1D power spectrum to required length with required Pixel size input_object a 1D list with a 1D curve to be interpolated length_current half size of the image size (in case of power spectrum, it can be different from the length of the input_object) length_interpolated length of the interpolated 1D curve Pixel_size_current pixel size of the input 1D list Pixel_size_interpolated pixel size of the target 1D list One can either input the two lengths or two respective pixel sizes
def reshape_1d(input_object, length_current=0, length_interpolated=0, Pixel_size_current = 0., Pixel_size_interpolated = 0.): interpolated = [] if length_current == 0: length_current = len(input_object) lt = len(input_object) - 2 if length_interpolated == 0: if( Pixel_size_interpolated != Pixel_size_current): length_interpolated = int(length_current*Pixel_size_current/Pixel_size_interpolated + 0.5) else: ERROR("Incorrect input parameters","reshape_1d",1) return [] if Pixel_size_current == 0.: Pixel_size_current = 1. Pixel_size_interpolated = Pixel_size_current*float(length_current)/float(length_interpolated) qt =Pixel_size_interpolated/Pixel_size_current for i in xrange(length_interpolated): xi = float(i)*qt ix = min(int(xi),lt) df = xi -ix xval = (1.0-df)*input_object[ix] + df*input_object[ix+1] interpolated.append(xval) return interpolated
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpolate(signal, new_length):\n assert len(signal) > 1 and len(signal[0]) > 1\n current_length = len(signal)\n signal = np.array(signal).T\n new_signal = []\n x_array = get_x_array(current_length, new_length)\n\n for l in range(len(signal)):\n fp = signal[l]\n xp = list(range(current_length))\n new_f = np.interp(x_array, xp, fp)\n new_signal.append(new_f)\n\n signal = np.array(new_signal).T\n return signal", "def _linear_interpolation(\n prevFrame : \"np.ndarray\",\n cFrame : \"np.ndarray\",\n fID : \"int\",\n smoothingFrames : \"int\"\n ) -> \"np.ndarray\":\n prevWeight = 1-((fID+1)/smoothingFrames)\n finalWeight = (fID+1)/smoothingFrames\n transitionFrame = prevWeight * prevFrame + finalWeight*cFrame\n return transitionFrame.astype(np.uint8)", "def interpolatePeriodicSpline(x, y) :\n n = len(x)\n\n dim = 4 * (n - 1)\n b = np.zeros((dim, 1))\n A = np.zeros((dim, dim))\n\n for i in range(n-1):\n x1 = x[i]\n x2 = x[i+1]\n y1 = y[i]\n y2 = y[i+1]\n b[i*4:(i+1)*4, 0] = [y1, y2, 0, 0]\n\n A[i*4, i*4:(i+1)*4] = [pow(x1,3), pow(x1,2), x1, 1] \n A[i*4+1, i*4:(i+1)*4] = [pow(x2,3), pow(x2,2), x2, 1]\n if (i != n-2):\n A[i*4+2, i*4:(i+2)*4] = [3*pow(x2,2), 2 * x2, 1, 0, -3*pow(x2,2), -2 * x2, -1, 0, ]\n A[i*4+3, i*4:(i+2)*4] = [6*x2, 2, 0, 0, -6*x2, -2, 0, 0]\n else: \n A[i*4+2, 0:4] = [3 * pow(x[0],2), 2 * x[0], 1, 0]\n A[i*4+2, i*4:(i+1)*4] = [-3 * pow(x2,2), -2 * x2, -1, 0]\n A[i*4+3, 0:4] = [6 * x[0], 2, 0, 0]\n A[i*4+3, i*4:(i+1)*4] = [-6 * x2, -2, 0, 0]\n # solve linear system for the coefficients of the spline\n coeffs = np.linalg.solve(A, b)\n\n # extract local pieces\n spline = []\n for k in range(n-1):\n spline.append(np.poly1d(coeffs[k*4:(k+1)*4, 0]))\n\n return spline", "def compute_interpolation_weights(out_size, in_size, scale):\n # lower, upper, lerp\n res = [[0, 0, 0] for _ in range(out_size + 1)]\n for i in range(out_size - 1, -1, -1):\n val = i * scale\n res[i][0] = int(val)\n res[i][1] = min(res[i][0] + 1, in_size - 1)\n res[i][2] = val - int(val)\n return res", "def interpolate(arrayin,shape=(256, 256)):\r\n if arrayin.dtype == 'complex' :\r\n Ln = interpolate(np.real(arrayin),shape) + 1.0J * interpolate(np.imag(arrayin),shape)\r\n #Ln = interpolate(np.abs(arrayin),new_res) * np.exp(1.0J * interpolate(np.angle(arrayin),new_res))\r\n else :\r\n coeffs = ndimage.spline_filter(arrayin)\r\n rows,cols = arrayin.shape\r\n coords = np.mgrid[0:rows-1:1j*shape[0],0:cols-1:1j*shape[1]]\r\n Ln = sp.ndimage.map_coordinates(coeffs, coords, prefilter=False)\r\n return Ln", "def interpolateSpline(x, y) :\n n = len(x)\n\n dim = 4 * (n - 1)\n b = np.zeros((dim, 1))\n A = np.zeros((dim, dim))\n\n for i in range(n-1):\n x1 = x[i]\n x2 = x[i+1]\n y1 = y[i]\n y2 = y[i+1]\n b[i*4:(i+1)*4, 0] = [y1, y2, 0, 0]\n\n A[i*4, i*4:(i+1)*4] = [pow(x1,3), pow(x1,2), x1, 1] \n A[i*4+1, i*4:(i+1)*4] = [pow(x2,3), pow(x2,2), x2, 1]\n if (i != n-2):\n A[i*4+2, i*4:(i+2)*4] = [3*pow(x2,2), 2 * x2, 1, 0, -3*pow(x2,2), -2 * x2, -1, 0, ]\n A[i*4+3, i*4:(i+2)*4] = [6*x2, 2, 0, 0, -6*x2, -2, 0, 0]\n else: \n A[i*4+2, 0:4] = [6*x[0], 2, 0, 0]\n A[i*4+3, i*4:(i+1)*4] = [6*x2, 2, 0, 0]\n \n # solve linear system for the coefficients of the spline\n coeffs = np.linalg.solve(A, b)\n\n # extract local pieces\n spline = []\n for k in range(n-1):\n spline.append(np.poly1d(coeffs[k*4:(k+1)*4, 0]))\n\n return spline", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PowerSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] = self.spectrum[:, 1] * (self.spectrum[:, 0] * 1e-9 / (constants.c * constants.h))\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def interpolate_subspec(wls, fls, prev_index, gap_ind, wl_step):\n # Get the subspectrum\n sub_spec_wls = wls[prev_index:gap_ind+1]\n sub_spec_fls = fls[prev_index:gap_ind+1]\n # Interpolate onto the new grid, using linear interpolation.\n interp_f = interp1d(sub_spec_wls, sub_spec_fls, kind=\"linear\")\n\n # Calculate the number of linear wavelength steps needed.\n min_wl = min(sub_spec_wls)\n max_wl = max(sub_spec_wls)\n n_steps = math.ceil((max_wl - min_wl) / wl_step)\n # Try a couple step sizes to get as close to the ideal size as possible.\n new_wls1, step_size1 = numpy.linspace(min_wl, max_wl, n_steps,\n retstep=True)\n new_wls2, step_size2 = numpy.linspace(min_wl, max_wl, n_steps+1,\n retstep=True)\n new_wls3, step_size3 = numpy.linspace(min_wl, max_wl, n_steps-1,\n retstep=True)\n # Choose the linear step size closest to our desired step size.\n diffs = [abs(x-wl_step) for x in [step_size1, step_size2, step_size3]]\n if diffs[0] <= diffs[1] and diffs[0] <= diffs[2]:\n new_wls = new_wls1\n elif diffs[1] <= diffs[2] and diffs[1] <= diffs[0]:\n new_wls = new_wls2\n else:\n new_wls = new_wls3\n # Calculate the interpolated values and extend the spectrum with them.\n return (list(new_wls), list(interp_f(new_wls)))", "def smoothed(sequence, step=1, start=0):\n next_index = start + 1\n last = len(sequence) \n new_sequence = []\n if not step:\n return sequence\n ratio_step = step + 1\n for item in sequence:\n new_sequence.append(item)\n if next_index < last:\n next_item = sequence[next_index]\n ratio = (item + next_item) / (step + 1)\n ratio = int(ratio)\n for x in range(step):\n value = (ratio * x) + item\n new_sequence.append(int(value))\n next_index = next_index + 1\n return new_sequence", "def uniformize(self):\n\n self.len = len(self.x)\n\n if self.len > 1:\n # comput length of the shape:\n shape_length, scale = self.euclidian_length()\n\n # find new points:\n new_shape = Stroke()\n new_shape.x = []\n new_shape.y = []\n step = shape_length / float(self.len)\n biggest_smoller_point = 0\n new_shape.append(self.x[0], self.y[0])\n for i in 1 + np.array(range(len(self.x) - 1)):\n try:\n while i * step > scale[biggest_smoller_point]:\n biggest_smoller_point += 1\n\n biggest_smoller_point -= 1\n x0 = self.x[biggest_smoller_point]\n y0 = self.y[biggest_smoller_point]\n x1 = self.x[biggest_smoller_point + 1]\n y1 = self.y[biggest_smoller_point + 1]\n diff = float(i * step - scale[biggest_smoller_point])\n dist = float(scale[biggest_smoller_point + 1] - scale[biggest_smoller_point])\n new_x = x0 + diff * (x1 - x0) / dist\n new_y = y0 + diff * (y1 - y0) / dist\n new_shape.append(new_x, new_y)\n\n except IndexError:\n print i * step\n print biggest_smoller_point\n print scale\n # new_shape.append(self.x[-1], self.y[-1])\n\n\n self.x = new_shape.x\n self.y = new_shape.y\n self.len = new_shape.len", "def _linearize(wcsim, wcsima, wcsref, imcrpix, f, shift, hx=1.0, hy=1.0):\n x0 = imcrpix[0]\n y0 = imcrpix[1]\n p = np.asarray([[x0, y0],\n [x0 - hx, y0],\n [x0 - hx * 0.5, y0],\n [x0 + hx * 0.5, y0],\n [x0 + hx, y0],\n [x0, y0 - hy],\n [x0, y0 - hy * 0.5],\n [x0, y0 + hy * 0.5],\n [x0, y0 + hy]],\n dtype=np.float64)\n # convert image coordinates to reference image coordinates:\n p = wcsref.wcs_world2pix(\n wcsim.wcs_pix2world(p, 1), 1\n ).astype(np.longdouble)\n # apply linear fit transformation:\n p = np.dot(f, (p - shift).T).T\n # convert back to image coordinate system:\n p = wcsima.wcs_world2pix(\n wcsref.wcs_pix2world(p.astype(np.float64), 1), 1\n ).astype(np.longdouble)\n\n # derivative with regard to x:\n u1 = ((p[1] - p[4]) + 8 * (p[3] - p[2])) / (6 * hx)\n # derivative with regard to y:\n u2 = ((p[5] - p[8]) + 8 * (p[7] - p[6])) / (6 * hy)\n\n return (np.asarray([u1, u2]).T, p[0])", "def gd(a, step_size=0.1, steps=42):\n out = []\n ### YOUR CODE HERE\n out.append(np.array([256,1]))\n for i in range(steps):\n point = out[i]\n gradient = np.array([0.5*2*a[i],0.5*2*a[i+1]])\n npoint = point - step_size*gradient\n out.append(npoint)\n ### END CODE\n return out", "def __hinterpolate(self):\n \n # Temp. Data holders\n upperint = []\n lowerint = []\n \n # Dont like this, because here we insert points into the rawdata\n # But it creates consisitent results in the interpolation results\n if self.__upper[0][0] != 0: self.__upper.insert(0,(0.,0.))\n if self.__lower[0][0] != 0: self.__lower.insert(0,(0.,0.))\n \n # Create points\n if self.__interpolation_method == \"l\":\n xpointsU = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n xpointsL = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n elif self.__interpolation_method == \"p\":\n xpointsU = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n xpointsL = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n \n # Calculate secants\n uppersec = [(self.__upper[i+1][1]-self.__upper[i][1])/(self.__upper[i+1][0]-self.__upper[i][0]) for i in range(len(self.__upper)-1)]\n lowersec = [(self.__lower[i+1][1]-self.__lower[i][1])/(self.__lower[i+1][0]-self.__lower[i][0]) for i in range(len(self.__lower)-1)]\n \n # Calculate tangents\n uppertan = [(uppersec[k-1]+uppersec[k])/2 for k in range(1,len(uppersec))]\n uppertan.insert(0,uppersec[0])\n uppertan.append(uppersec[-1])\n\n lowertan = [(lowersec[k-1]+lowersec[k])/2 for k in range(1,len(lowersec))]\n lowertan.insert(0,lowersec[0])\n lowertan.append(lowersec[-1])\n \n # Hermite blending functions\n p0 = lambda t: 2*t**3 - 3*t**2 + 1\n m0 = lambda t: t**3 - 2*t**2 + t\n p1 = lambda t: -2*t**3 + 3*t**2\n m1 = lambda t: t**3 - t**2\n \n # Find matching points to improve accuarcy\n matchU = [(i,j) for i in range(len(xpointsU)) for j in range(len(self.__upper)) if xpointsU[i] == self.__upper[j][0]]\n matchL = [(i,j) for i in range(len(xpointsL)) for j in range(len(self.__lower)) if xpointsL[i] == self.__lower[j][0]]\n \n # Reverse match pairs to insure no index errors\n matchU.reverse()\n matchL.reverse()\n\n# print(self.__lower)\n# print(xpointsL)\n # Pop xpoints that dont require interpolation and append the point into the upperint list\n for i in matchU:\n xpointsU.pop(i[0])\n upperint.append(self.__upper[i[1]])\n \n# print(matchL)\n \n # Same process as above but for lower airfoil\n for i in matchL:\n xpointsL.pop(i[0])\n lowerint.append(self.__lower[i[1]])\n \n # Interpolate upper points\n for xp in xpointsU:\n for i in range(len(self.__upper)-1):\n if self.__upper[i][0] < xp < self.__upper[i+1][0]:\n h = self.__upper[i+1][0]-self.__upper[i][0]\n t = (xp - self.__upper[i][0]) / h\n solution = ( p0(t)*self.__upper[i][1] + h*m0(t)*uppertan[i] + p1(t)*self.__upper[i+1][1] + h*m1(t)*uppertan[i+1] )\n upperint.append((xp,solution))\n \n # Interpolate lower points\n for xp in xpointsL:\n for i in range(len(self.__lower)-1):\n if self.__lower[i][0] < xp < self.__lower[i+1][0]:\n h = self.__lower[i+1][0]-self.__lower[i][0]\n t = (xp - self.__lower[i][0]) / h\n solution = ( p0(t)*self.__lower[i][1] + h*m0(t)*lowertan[i] + p1(t)*self.__lower[i+1][1] + h*m1(t)*lowertan[i+1] )\n lowerint.append((xp,solution))\n \n # Sort the points to keep the correct sequence\n upperint.sort(key=lambda x:x[0], reverse=True)\n lowerint.sort(key=lambda x:x[0])\n \n # Do checks to insure no duplicates\n if upperint[0][0] != 1.0: upperint.insert(0,(1.0,0.0))\n if upperint[-1][0] != 0.0: upperint.append((0.0,0.0))\n if lowerint[0][0] == 0.0: lowerint.pop(0)\n if lowerint[-1][0] != 1.0: lowerint.append((1.0,0.0))\n\n self.__ProcPoints = upperint + lowerint", "def cv_compute_interpolation_weights(out_size, in_size, scale):\n # lower, upper, lerp\n res = [[0, 0, 0] for _ in range(out_size + 1)]\n res[-1] = [0, 0]\n for i in range(out_size - 1, -1, -1):\n val = (i + 0.5) * scale - 0.5\n res[i][0] = max(0, int(val))\n res[i][1] = min(res[i][0] + 1, in_size - 1)\n res[i][2] = max(0, val - int(val))\n return res", "def poly_reduce(multiplier, input_size, compressed_values):\n # Square the multiplier and fully reduce it. This does not affect\n # the result modulo 2**61 - 1, but does differ from a\n # direct evaluation modulo 2**64 - 8.\n mulsq = (multiplier ** 2) % (2 ** 61 - 1)\n acc = [0]\n\n def update(y0, y1):\n \"\"\"Double-pumped Horner update (mostly) modulo 8 * (2**61 - 1).\"\"\"\n # Perform a pair of Horner updates in (mod 2**61 - 1).\n reference = multiplier * (acc[0] + y0)\n reference = multiplier * (reference + y1)\n reference %= 2 ** 61 - 1\n\n # The real update is in (mod 2**64 - 8), with a multiplier^2\n # reduced to (mod 2**61 - 1).\n acc[0] = (mulsq * (acc[0] + y0) + multiplier * y1) % (W - 8)\n # Both values should be the same (mod 2**61 - 1).\n assert acc[0] % (2 ** 61 - 1) == reference\n\n for value in compressed_values:\n lo = value % W\n hi = value // W\n update(lo, hi)\n return acc[0]", "def linear_interpolation(self, pt1, pt2, unknown):\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity", "def interpolate(self, current_frame_number):\n # check if a detection was added in this frame -> makes no sense otherwise\n if self.get_last_frame() != current_frame_number:\n return\n\n if len(self.detection_list) < 2:\n return\n \n start_frame_number = self.detection_list[-2].frame_number\n ds = self.detection_list[-2]\n end_frame_number = self.detection_list[-1].frame_number\n de = self.detection_list[-1]\n # check if frames are missing -> if none missing break\n if start_frame_number + 1 == end_frame_number:\n return\n\n # interpolate over consecutive frames (linear)\n num_interpolate = end_frame_number - start_frame_number - 1\n\n #step size\n xs_s = (de.x1 - ds.x1) / (num_interpolate + 1)\n xe_s = (de.x2 - ds.x2) / (num_interpolate + 1) \n ys_s = (de.y1 - ds.y1) / (num_interpolate + 1)\n ye_s = (de.y2 - ds.y2) / (num_interpolate + 1) \n\n for i in range(num_interpolate):\n xi1 = int(ds.x1 + xs_s * (i + 1))\n xi2 = int(ds.x2 + xe_s * (i + 1))\n yi1 = int(ds.y1 + ys_s * (i + 1))\n yi2 = int(ds.y2 + ye_s * (i + 1))\n di = det.Detection(ds.label, xi1, yi1, xi2 ,yi2, start_frame_number + i + 1, interpolated = True)\n self.detection_list.insert(len(self.detection_list) - 1, di)", "def interpolate_to_frequency(a, freq_llimit, freq_ulimit):\n a_min = a.min()\n a_max = a.max()\n return np.interp(a, (a_min, a_max), (freq_llimit, freq_ulimit))", "def create_spectral_bandpass_interpol(interpol_wavelen, interpol_rad, center_wvl,\n save_dir):\n\n save_dir = os.path.join(save_dir, r'look_up_table')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n\n center_wvl1 = np.arange(min(center_wvl), max(center_wvl), 2)\n\n\n\n\n for j in np.arange(0, interpol_wavelen.shape[1]):\n #print(j)\n dframe = pd.DataFrame()\n wavelen = interpol_wavelen[:, j]\n\n radiance = interpol_rad[:, j]\n sampled_wvl = np.arange(min(wavelen), max(wavelen), 0.01)\n fit_params = interp1d(wavelen, radiance, kind='slinear')\n fitted_val = fit_params(sampled_wvl)\n #peak_val = np.where(fitted_val==max(fitted_val))[0]\n #print(peak_val)\n #peak_shift = sampled_wvl[peak_val] - CW1[j]\n\n\n# if peak_shift >0:\n# sampled_wvl = sampled_wvl - peak_shift\n# elif peak_shift <0:\n# sampled_wvl = sampled_wvl + peak_shift\n# else:\n# sampled_wvl = sampled_wvl\n#\n# print(sampled_wvl[peak_val] - CW1[j])\n\n dframe['Wavelength'] = sampled_wvl\n dframe['Radiance'] = fitted_val\n dframe.round(4).to_csv(save_dir + '/' + 'bandpass_' + \\\n str(round(center_wvl1[j], 2))+'_nm.csv')\n plt.plot(sampled_wvl, fitted_val/np.max(fitted_val), 'g.--')\n plt.grid(True, linestyle=':')\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Normalized Spectral Response')\n plt.title('TEMPO Spectral Bandpass (WL = ' + str(round(center_wvl1[j], 2)) + ' nm)')\n plt.ylim(0, 1.1)\n plt.xlim(min(wavelen), max(wavelen))\n #plt.show()\n\n # Now let us save the spectral bandpass data and spectral bandpass plot\n plt.savefig(save_dir + '/' + 'bandpass_' + str(round(center_wvl1[j], 2))+'_nm.png',\n dpi=100)\n plt.close('all')", "def interpolate_spectrum(interp,wave_ini,flux_ini,wave_fnl,flux_fnl):\n wave_ini_p = wave_ini.ctypes.data_as(ct.POINTER(ct.c_double))\n flux_ini_p = flux_ini.ctypes.data_as(ct.POINTER(ct.c_double))\n wave_fnl_p = wave_fnl.ctypes.data_as(ct.POINTER(ct.c_double))\n flux_fnl_p = flux_fnl.ctypes.data_as(ct.POINTER(ct.c_double))\n\n mask = np.zeros_like(wave_fnl).astype('int32')\n mask_p = mask.ctypes.data_as(ct.POINTER(ct.c_int))\n\n interp(wave_ini.shape[0],wave_fnl.shape[0],\n wave_ini_p,flux_ini_p,\n wave_fnl_p,flux_fnl_p,mask_p)\n\n return mask", "def fit_wavelength(identlist, npixel, xorder, yorder, maxiter, clipping,\n fit_filter=None):\n # find physical order\n k, offset = find_order(identlist, npixel)\n\n # parse the fit_filter=None\n if fit_filter is None:\n fit_filter = lambda item: True\n\n # convert indent_line_lst into fitting inputs\n fit_p_lst = [] # normalized pixel\n fit_o_lst = [] # diffraction order\n fit_w_lst = [] # order*wavelength\n fit_m_lst = [] # initial mask\n # the following list is used to find the position (aperture, no)\n # of each line\n lineid_lst = []\n for aperture, list1 in sorted(identlist.items()):\n order = k*aperture + offset\n #norm_order = 50./order\n #norm_order = order/50.\n list1['order'][:] = order\n for iline, item in enumerate(list1):\n norm_pixel = item['pixel']*2/(npixel-1) - 1\n fit_p_lst.append(norm_pixel)\n fit_o_lst.append(order)\n #fit_o_lst.append(norm_order)\n #fit_w_lst.append(item['wavelength'])\n fit_w_lst.append(item['wavelength']*order)\n fit_m_lst.append(fit_filter(item))\n lineid_lst.append((aperture, iline))\n fit_p_lst = np.array(fit_p_lst)\n fit_o_lst = np.array(fit_o_lst)\n fit_w_lst = np.array(fit_w_lst)\n fit_m_lst = np.array(fit_m_lst)\n\n mask = fit_m_lst\n\n for nite in range(maxiter):\n coeff = polyfit2d(fit_p_lst[mask], fit_o_lst[mask], fit_w_lst[mask],\n xorder=xorder, yorder=yorder)\n res_lst = fit_w_lst - polyval2d(fit_p_lst, fit_o_lst, coeff)\n res_lst = res_lst/fit_o_lst\n\n mean = res_lst[mask].mean(dtype=np.float64)\n std = res_lst[mask].std(dtype=np.float64)\n m1 = res_lst > mean - clipping*std\n m2 = res_lst < mean + clipping*std\n new_mask = m1*m2*mask\n if new_mask.sum() == mask.sum():\n break\n else:\n mask = new_mask\n\n # convert mask back to ident_line_lst\n for lineid, ma, res in zip(lineid_lst, mask, res_lst):\n aperture, iline = lineid\n identlist[aperture][iline]['mask'] = ma\n identlist[aperture][iline]['residual'] = res\n\n # number of lines and used lines\n nuse = mask.sum()\n ntot = fit_w_lst.size\n return coeff, std, k, offset, nuse, ntot", "def linear_interpolator(moving):\n \n if isinstance(moving, medipy.base.Image) :\n MovingImageType = medipy.itk.itk_image_type(moving)\n else :\n MovingImageType = moving\n \n return itk.LinearInterpolateImageFunction[MovingImageType, itk.D].New()", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PhotocurrentSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] *= constants.e\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def _interpolation(self, video):\n self.F_int = []\n self.mgrid_0 = []\n self.mgrid_1 = []\n for p in range(video.points.shape[0]):\n _m_0, _m_1 = np.meshgrid(self.extended_points_0[p], self.extended_points_1[p])\n _F_int = interp2d(self.extended_points_0[p], self.extended_points_1[p], video.mraw[0, _m_0, _m_1], kind='cubic')\n self.F_int.append(_F_int)\n\n m_0, m_1 = np.meshgrid(self.extended_points_0[p, self.pad:-self.pad], self.extended_points_1[p, self.pad:-self.pad])\n self.mgrid_0.append(m_0)\n self.mgrid_1.append(m_1)", "def spline_interp(h,yy,yy_diff2,x) :\n assert type(yy)==numpy.ndarray\n #print(__name__, type(h))\n assert type(h)!=numpy.ndarray\n \n n=yy.shape[0]\n nlo=max(int(x/h),0)\n if nlo>n-1: return(0.0)\n nhi=min(nlo+1,n-1)\n a=nhi-x/h # This is checked... different to Fortran version due to 0-based arrays\n b=1.0-a\n y=a*yy[nlo]+b*yy[nhi]+((a**3-a)*yy_diff2[nlo]+(b**3-b)*yy_diff2[nhi])*(h**2)/6.0\n return y", "def __init__(self, model, line, segments = None, influence = None, \r\n strength = 1, variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into #segments pieces\r\n \r\n self.line_raw = copy.copy(line)\r\n \r\n if segments is None:\r\n \r\n self.segments = line.shape[0]-1\r\n \r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n \r\n raise Exception('Number of segments '+str(self.segments)+\" mustn't be smaller than number of line points \"+str(line.shape[0])+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = copy.copy(self.line[:,0] + 1j*self.line[:,1])\r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # --------------------------------------------------------------------- \r\n \r\n \r\n \r\n \r\n self.strength = np.ones(self.segments)*strength\r\n \r\n if influence is None:\r\n self.influence = self.model.domain_radius*2\r\n else:\r\n self.influence = influence\r\n \r\n \r\n self.Zi = []\r\n self.offset_outside = []\r\n self.L = []\r\n self.zc = []\r\n self.segment_nvec = []\r\n self.head_target = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n influence_pt = (self.line_c[seg+1]-self.line_c[seg])*self.influence/self.L[seg] + self.line_c[seg]\r\n Z = (2*influence_pt-(self.line_c[seg]+self.line_c[seg+1]))/(self.line_c[seg+1]-self.line_c[seg])\r\n self.Zi += [copy.copy(Z)]\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n part1 = np.nan_to_num((Z+1)*np.log(Z+1))\r\n part2 = np.nan_to_num((Z-1)*np.log(Z-1))\r\n self.offset_outside += [self.L[seg] / (4*np.pi) * (part1 - part2)]\r\n \r\n # Convert list of segment centers to array\r\n self.zc = np.asarray(self.zc)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def linear_interpolate(src_code, dst_code, step=5):\n assert (len(src_code.shape) == 2 and len(dst_code.shape) == 2 and\n src_code.shape[0] == 1 and dst_code.shape[0] == 1 and\n src_code.shape[1] == dst_code.shape[1])\n\n linspace = np.linspace(0.0, 1.0, step)[:, np.newaxis].astype(np.float32)\n return src_code + linspace * (dst_code - src_code)", "def datasetratiocopy_xl_extend(l,ratio,x_offset,y_offset):#只延伸上下两边以及左边的点\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01:\r\n if pos_x<0: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset\r\n else:\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def datasetratiocopy_extend(l,ratio,x_offset,y_offset):#全部四边上的点都延伸\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def interpolate_to_parent(self, start, end, linspace_count):\n \n v = end - start\n length = norm(v)\n v = v / length # Make v a unit vector\n l = np.linspace(0, length, linspace_count) \n\n return np.array([start[i] + v[i] * l for i in range(3)])", "def linear_interpolation(atoms1, atoms2, N):\n \n pos1 = atoms1.positions\n pos2 = atoms2.positions\n images = [atoms1]\n for n in range(N):\n nn = n + 1\n atoms_tmp = atoms1.copy()\n atoms_tmp.positions = ( (N+1-nn)*pos1 + nn*pos2 ) / (N+1)\n images += [atoms_tmp]\n images += [atoms2]\n return images", "def fillInPoints(lsOriginal, intIterations = 1):\r\n assert len(lsOriginal) > 1\r\n \r\n # Loop through number of intermediate fillings.\r\n for j in range(0,intIterations):\r\n # New (filled) list.\r\n lsFilled = []\r\n for i in range(0, len(lsOriginal) - 1):\r\n lsFilled.append(lsOriginal[i])\r\n fltIntermediate = (lsOriginal[i] +\r\n (lsOriginal[i + 1] - lsOriginal[i]) / 2.0)\r\n lsFilled.append(fltIntermediate)\r\n # Edge case to add last element of original list.\r\n if i == (len(lsOriginal) - 2):\r\n lsFilled.append(lsOriginal[i + 1])\r\n lsOriginal = lsFilled\r\n \r\n # Return the filled list.\r\n return lsFilled", "def bil_interpolate(p, ls, dx, dy):\n m = p.shape[0]\n max_i = ls.shape[0]\n max_j = ls.shape[1]\n\n z = np.zeros(p.shape[0])\n code = \\\n \"\"\"\n #line 38 \"distmesh2d.py\"\n int i=0, j=0;\n double x=0.0, y=0.0;\n\n // Loop through all coordinates\n for(int k=0; k<m; k++) {\n // Get grid indices\n i = int( P2(k,0)/dx-0.5 );\n j = int( P2(k,1)/dy-0.5 );\n\n // Probably not helpful.\n // Just take care that (x,y) lie inside the grid ...\n if(i<0 || i>=max_i || j<0 || j>=max_j) {\n Z1(k) = 100000.0;\n continue;\n }\n\n // Handle corner case when coordinates lie on border\n if(i==max_i-1) i-=1;\n if(j==max_j-1) j-=1;\n\n // Local coordinates scaled to [0,1]\n // Maybe this would rather be fmod()?\n x = (P2(k,0)-i*dx)/dx;\n y = (P2(k,1)-j*dy)/dy;\n\n // Interpolate ls values\n double tmp;\n Z1(k) = LS2(i,j)*(1.0-x)*(1.0-y)\n +LS2(i+1,j)*x*(1.0-y)\n +LS2(i,j+1)*(1-x)*y\n +LS2(i+1,j+1)*x*y;\n }\n \"\"\"\n weave.inline(code, \n ['p', 'ls', 'dx', 'dy', 'z', 'm', 'max_i', 'max_j']) \n return z", "def interpolate(x_list, y_list, z_list):\n x1 = x_list[-2]\n x2 = x_list[-1]\n y1 = y_list[-2]\n y2 = y_list[-1]\n z1 = z_list[-2]\n z2 = z_list[-1]\n r = -y1/y2\n x_land = (x1+r*x2)/(r+1)\n z_land = (z1+r*z2)/(r+1)\n x_list[-1] = x_land\n y_list[-1] = 0.0\n z_list[-1] = z_land", "def interpolate(x1, x2, u, N):\n \n # finding the magnitude of each component\n a1 = np.matmul(x1, u)\n a2 = np.matmul(x2, u)\n\n ims = [np.matmul(u, t * a1 + (1 - t) * a2) \\\n for t in np.linspace(0, 1, N)]\n\n return np.stack(ims, 0)", "def interpolate(self, N):\n self.upsampling_rate *= N\n self.data = np.array([interpolate_spline(x,N) for x in self.data])", "def lin_interpol(x_p, y_p):\r\n f = np.zeros([ x_p.shape[0] - 1 , 4 ]) # Coefficents and interval array\r\n \r\n for i in range( x_p.shape[0] - 1 ): # for every x[i], x[i+1] pair\r\n \r\n x_coeff = (y_p[i+1] - y_p[i]) / (x_p[i+1] - x_p[i])\r\n const = (x_p[i+1]*y_p[i] - x_p[i]*y_p[i+1] ) / (x_p[i+1] - x_p[i])\r\n \r\n # save the x coefficent, constant and the interval for this line\r\n f[i,:] = x_coeff, const, x_p[i], x_p[i+1]\r\n \r\n for a, b, start, end in f: # for every line fitted\r\n line_x = np.linspace( start, end, 3) # points to plot in x_range\r\n line_y = line_x * a + b # find the fitted line value at these points\r\n plt.plot(line_x,line_y,'k--', lw = 1, label = 'Linear' if a==f[0][0] else \"\") # only label one plot\r", "def interp_2d(_x, _y, _x_min, _x_step, _nx, _y_min, _y_step, _ny, _ar_f, _ord=3, _ix_per=1, _ix_ofst=0):\r\n if(_ord == 1): #bi-linear interpolation based on 4 points\r\n ix0 = int(trunc((_x - _x_min)/_x_step + 1.e-09))\r\n if(ix0 < 0):\r\n ix0 = 0\r\n elif(ix0 >= _nx - 1):\r\n ix0 = _nx - 2\r\n ix1 = ix0 + 1\r\n tx = (_x - (_x_min + _x_step*ix0))/_x_step\r\n \r\n iy0 = int(trunc((_y - _y_min)/_y_step + 1.e-09))\r\n if(iy0 < 0):\r\n iy0 = 0\r\n elif(iy0 >= _ny - 1):\r\n iy0 = _ny - 2\r\n iy1 = iy0 + 1\r\n ty = (_y - (_y_min + _y_step*iy0))/_y_step\r\n\r\n nx_ix_per = _nx*_ix_per\r\n iy0_nx_ix_per = iy0*nx_ix_per\r\n iy1_nx_ix_per = iy1*nx_ix_per\r\n ix0_ix_per_p_ix_ofst = ix0*_ix_per + _ix_ofst\r\n ix1_ix_per_p_ix_ofst = ix1*_ix_per + _ix_ofst\r\n a00 = _ar_f[iy0_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f10 = _ar_f[iy0_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f01 = _ar_f[iy1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f11 = _ar_f[iy1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n a10 = f10 - a00\r\n a01 = f01 - a00\r\n a11 = a00 - f01 - f10 + f11\r\n return a00 + tx*(a10 + ty*a11) + ty*a01\r\n\r\n elif(_ord == 2): #bi-quadratic interpolation based on 6 points\r\n ix0 = int(round((_x - _x_min)/_x_step))\r\n if(ix0 < 1):\r\n ix0 = 1\r\n elif(ix0 >= _nx - 1):\r\n ix0 = _nx - 2\r\n ixm1 = ix0 - 1\r\n ix1 = ix0 + 1\r\n tx = (_x - (_x_min + _x_step*ix0))/_x_step\r\n\r\n iy0 = int(round((_y - _y_min)/_y_step))\r\n if(iy0 < 1):\r\n iy0 = 1\r\n elif(iy0 >= _ny - 1):\r\n iy0 = _ny - 2\r\n iym1 = iy0 - 1\r\n iy1 = iy0 + 1\r\n ty = (_y - (_y_min + _y_step*iy0))/_y_step\r\n\r\n nx_ix_per = _nx*_ix_per\r\n iym1_nx_ix_per = iym1*nx_ix_per\r\n iy0_nx_ix_per = iy0*nx_ix_per\r\n iy1_nx_ix_per = iy1*nx_ix_per\r\n ixm1_ix_per_p_ix_ofst = ixm1*_ix_per + _ix_ofst\r\n ix0_ix_per_p_ix_ofst = ix0*_ix_per + _ix_ofst\r\n ix1_ix_per_p_ix_ofst = ix1*_ix_per + _ix_ofst\r\n fm10 = _ar_f[iy0_nx_ix_per + ixm1_ix_per_p_ix_ofst]\r\n a00 = _ar_f[iy0_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f10 = _ar_f[iy0_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f0m1 = _ar_f[iym1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f01 = _ar_f[iy1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f11 = _ar_f[iy1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n a10 = 0.5*(f10 - fm10)\r\n a01 = 0.5*(f01 - f0m1)\r\n a11 = a00 - f01 - f10 + f11\r\n a20 = 0.5*(f10 + fm10) - a00\r\n a02 = 0.5*(f01 + f0m1) - a00\r\n return a00 + tx*(a10 + tx*a20 + ty*a11) + ty*(a01 + ty*a02)\r\n \r\n elif(_ord == 3): #bi-cubic interpolation based on 12 points\r\n ix0 = int(trunc((_x - _x_min)/_x_step + 1.e-09))\r\n if(ix0 < 1):\r\n ix0 = 1\r\n elif(ix0 >= _nx - 2):\r\n ix0 = _nx - 3\r\n ixm1 = ix0 - 1\r\n ix1 = ix0 + 1\r\n ix2 = ix0 + 2\r\n tx = (_x - (_x_min + _x_step*ix0))/_x_step\r\n\r\n iy0 = int(trunc((_y - _y_min)/_y_step + 1.e-09))\r\n if(iy0 < 1):\r\n iy0 = 1\r\n elif(iy0 >= _ny - 2):\r\n iy0 = _ny - 3\r\n iym1 = iy0 - 1\r\n iy1 = iy0 + 1\r\n iy2 = iy0 + 2\r\n ty = (_y - (_y_min + _y_step*iy0))/_y_step\r\n\r\n nx_ix_per = _nx*_ix_per\r\n iym1_nx_ix_per = iym1*nx_ix_per\r\n iy0_nx_ix_per = iy0*nx_ix_per\r\n iy1_nx_ix_per = iy1*nx_ix_per\r\n iy2_nx_ix_per = iy2*nx_ix_per\r\n ixm1_ix_per_p_ix_ofst = ixm1*_ix_per + _ix_ofst\r\n ix0_ix_per_p_ix_ofst = ix0*_ix_per + _ix_ofst\r\n ix1_ix_per_p_ix_ofst = ix1*_ix_per + _ix_ofst\r\n ix2_ix_per_p_ix_ofst = ix2*_ix_per + _ix_ofst\r\n f0m1 = _ar_f[iym1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f1m1 = _ar_f[iym1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n fm10 = _ar_f[iy0_nx_ix_per + ixm1_ix_per_p_ix_ofst]\r\n a00 = _ar_f[iy0_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f10 = _ar_f[iy0_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f20 = _ar_f[iy0_nx_ix_per + ix2_ix_per_p_ix_ofst]\r\n fm11 = _ar_f[iy1_nx_ix_per + ixm1_ix_per_p_ix_ofst]\r\n f01 = _ar_f[iy1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f11 = _ar_f[iy1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f21 = _ar_f[iy1_nx_ix_per + ix2_ix_per_p_ix_ofst]\r\n f02 = _ar_f[iy2_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f12 = _ar_f[iy2_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n a10 = -0.5*a00 + f10 - f20/6 - fm10/3\r\n a01 = -0.5*a00 + f01 - f02/6 - f0m1/3\r\n a11 = -0.5*(f01 + f10) + (f02 - f12 + f20 - f21)/6 + (f0m1 - f1m1 + fm10 - fm11)/3 + f11\r\n a20 = -a00 + 0.5*(f10 + fm10)\r\n a02 = -a00 + 0.5*(f01 + f0m1)\r\n a21 = a00 - f01 + 0.5*(f11 - f10 - fm10 + fm11)\r\n a12 = a00 - f10 + 0.5*(f11 - f01 - f0m1 + f1m1)\r\n a30 = 0.5*(a00 - f10) + (f20 - fm10)/6\r\n a03 = 0.5*(a00 - f01) + (f02 - f0m1)/6\r\n a31 = 0.5*(f01 + f10 - f11 - a00) + (f21 + fm10 - f20 - fm11)/6\r\n a13 = 0.5*(f10 - f11 - a00 + f01) + (f0m1 + f12 - f02 - f1m1)/6\r\n return a00 + tx*(a10 + tx*(a20 + tx*(a30 + ty*a31) + ty*a21) + ty*a11) + ty*(a01 + ty*(a02 + ty*(a03 + tx*a13) + tx*a12))\r\n return 0", "def adapt_length_scale(self):\n Ne = max(1,self.Ne)\n Nc = max(1,self.Nc)\n ratio = Ne/(Ne+Nc)\n self.mu *= 2*ratio", "def _interpolate_move_pattern(move_pattern, new_size, min_size=10, max_size=1000):\n\n # interpolate movement pattern to fit a certain size by linear interpolation\n if new_size < min_size:\n new_size = min_size\n elif new_size > max_size:\n new_size = max_size\n\n if len(move_pattern) > new_size:\n int_size = new_size * len(move_pattern) - len(move_pattern) + 1\n else:\n int_size = new_size\n\n # linear interpolation\n x = np.linspace(0, len(move_pattern), len(move_pattern))\n new_x = np.linspace(0, len(move_pattern), int_size)\n interpolated_move_pattern = np.interp(new_x, x, move_pattern)\n\n # in the above described case, we have to reslice the enlarged vector.\n if len(move_pattern) > new_size:\n return interpolated_move_pattern[::len(move_pattern)]\n else:\n return interpolated_move_pattern", "def createIntegratedPsf(self):\n\n (wavelengths, weights) = self.filter\n for i in range(len(wavelengths)):\n\n wavelength = wavelengths[i]\n weight = weights[i]\n self.convertToOpd(wavelength) # creates self.opd\n opd = self.embedOpd()\n zf = numpy.fft.fft2(opd)\n del opd\n # Compute the amplitude squared.\n # (psf is not really the point spread function yet)\n psf = np.conjugate(zf)\n # psf will now be the point spread function, but still complex\n np.multiply(psf, zf, psf)\n del zf\n # normalize the PSF, and convert to single precision\n psf = psf.real / psf.size\n psf = psf.astype(np.float32)\n\n self.center(psf)\n\n # This describes the image scale if no resampling is done.\n cdelt_before_resampling = (wavelength * MICRONStoMETERS) / \\\n (self.D * self.oversample) * RADIANStoDEGREES\n if self.pixel_size is None:\n # we won't resample the output image\n self.cdelt = cdelt_before_resampling\n # Extract a subset.\n if self.output_size < self.npix:\n o_npix = self.output_size\n n0 = (self.npix - o_npix) // 2\n self.integrated_psf += \\\n (psf[n0:n0 + o_npix, n0:n0 + o_npix] * weight)\n else:\n self.integrated_psf += (psf * weight)\n else:\n # we'll resample to this image scale\n self.cdelt = self.pixel_size / self.oversample * ARCSECtoDEGREES\n # These three parameters are only used by mapPsf and for\n # normalizing the weight after resampling.\n self.rescale = self.cdelt / cdelt_before_resampling\n self.input_center = (self.npix + 1) // 2\n self.output_center = (self.output_size + 1) // 2\n sub_psf = np.zeros((self.output_size, self.output_size),\n dtype=np.float32)\n # Do the resampling, writing the output to sub_psf.\n ndimage.geometric_transform(psf, self.mapPsf,\n output_shape=(self.output_size, self.output_size),\n output=sub_psf, prefilter=True)\n weight = weight * self.rescale**2\n self.integrated_psf += (sub_psf * weight)\n del sub_psf\n\n if self.verbose:\n print(\"PSF for wavelength %g has been computed\" % wavelength)", "def interpolate(\n input,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=False,\n):\n if size is not None:\n size = nest.flatten(size)\n if scale_factor is not None:\n scale_factor = nest.flatten(scale_factor)\n mode = mode.upper()\n mode = mode.replace('BILINEAR', 'LINEAR')\n mode = mode.replace('TRILINEAR', 'LINEAR')\n return FunctionLib.apply(\n 'Resize', input.device, [input],\n mode=mode, align_corners=align_corners,\n num_sizes=len(size) if size is not None else 0,\n num_scales=len(scale_factor) if scale_factor is not None else 0,\n sizes=size, scales=scale_factor)", "def interp_data(X, X_len, restore=False, interp_kind='linear'):\n \n [T, N, V] = X.shape\n X_new = np.zeros_like(X)\n \n # restore original lengths\n if restore:\n for n in range(N):\n t = np.linspace(start=0, stop=X_len[n], num=T)\n t_new = np.linspace(start=0, stop=X_len[n], num=X_len[n])\n for v in range(V):\n x_n_v = X[:,n,v]\n f = interpolate.interp1d(t, x_n_v, kind=interp_kind)\n X_new[:X_len[n],n,v] = f(t_new)\n \n # interpolate all data to length T \n else:\n for n in range(N):\n t = np.linspace(start=0, stop=X_len[n], num=X_len[n])\n t_new = np.linspace(start=0, stop=X_len[n], num=T)\n for v in range(V):\n x_n_v = X[:X_len[n],n,v]\n f = interpolate.interp1d(t, x_n_v, kind=interp_kind)\n X_new[:,n,v] = f(t_new)\n \n return X_new", "def polynomialInterpolation(self,s):\n #print(s)\n #s[i]=xi,s[j]=xj\n return Polynomial.createFromInterpolation(s,range(len(s)))\n #return Polynomial(s,T)", "def cubicSpline(x,y,x_int):\n\n #region \"learn\" the coefficients of the cubic polynomials that interpolate intervals in x.\n # amount of intervals/splines\n n = len(x)-1\n\n # a_i = y_i\n a = y[:-1]\n\n # h_i = x_{i+1} - x_i for i in 0..n-1\n h = x[1:]-x[:-1]\n\n # 2 * h_i + h_{i+1}\n diagA = 2*(h[1:]+h[:-1])\n \n # h_1..h_n-2\n hInA = h[1:-1]\n\n A = np.eye(n-1)*diagA\n # distribute h_1..h_n-2 above and underneath the diagonal\n A += np.diag(hInA,1)\n A += np.diag(hInA,-1)\n\n # construct RHS\n z = 3/h[1:] * (y[2:] - y[1:-1]) - 3/h[:-1] * (y[1:-1] - y[:-2])\n\n # c_0 = c_{n} = 0\n c = np.zeros(n+1)\n\n c[1:-1] = np.linalg.solve(A,z)\n \n b = (y[1:]-y[:-1])/h - h/3*(c[1:] + 2*c[:-1])\n\n d = 1/(3*h)*(c[1:]-c[:-1])\n #endregion\n\n #region interpolate all points in x_int\n y_int = x_int.copy()\n # for all intervals\n for i in range(len(x)-1):\n # find points to interpolate within given interval\n idx = np.where(np.logical_and(x[i]<= x_int,x_int < x[i+1]))[0]\n xx = x_int[idx]\n yy = np.polyval(np.array([d[i],c[i],b[i],a[i]]), xx-x[i])\n y_int[idx] = yy\n print(f'interpolating in interval [{x[i]},{x[i+1]}[')\n print(xx)\n print(yy)\n print('\\n')\n\n # edgecase where x_int contains exactly last interval border\n #find indicies if x_int contains dupes\n idx = np.where(x_int == x[len(x)-1])[0] \n # interpolate with last interval polynomial\n i = len(a)-1\n y_int[idx] = np.polyval(np.array([d[i],c[i],b[i],a[i]]), x_int[idx]-x[i])\n #endregion\n return y_int", "def smoothL1(input_flow, target_flow, size_average = True):\n target_valid = (target_flow < 192) & (target_flow > 0) \n return F.smooth_l1_loss(input_flow[target_valid], target_flow[target_valid], size_average=size_average)", "def interpolate(i0, d0, i1, d1):\n if i0 == i1:\n return [d0]\n values = []\n a = (d1 - d0) / (i1 - i0)\n d = d0\n for i in range(i0,i1+1):\n values.append(d)\n d = d + a\n return values", "def downsampleShape(self, numDesiredPoints):\n\n if len(self.x) > 2:\n t_current_x = np.linspace(0, 1, len(self.x))\n t_current_y = np.linspace(0, 1, len(self.y))\n t_desired_x = np.linspace(0, 1, numDesiredPoints)\n t_desired_y = np.linspace(0, 1, numDesiredPoints)\n f = interpolate.interp1d(t_current_x, self.x, kind='linear')\n self.x = f(t_desired_x).tolist()\n f = interpolate.interp1d(t_current_y, self.y, kind='linear')\n self.y = f(t_desired_y).tolist()\n\n self.len = numDesiredPoints", "def fp_wavelength_sol_new(p, loc):\n func_name = __NAME__ + '.fp_wavelength_sol_new()'\n # get parameters from p\n dopd0 = p['IC_FP_DOPD0']\n fit_deg = p['IC_FP_FIT_DEGREE']\n fp_large_jump = p['IC_FP_LARGE_JUMP']\n n_ord_start_fp = p['IC_FP_N_ORD_START']\n n_ord_final_fp = p['IC_FP_N_ORD_FINAL']\n cm_ind = p['IC_WAVE_FP_CM_IND']\n\n # find FP lines\n loc = find_fp_lines_new(p, loc)\n all_lines_2 = loc['ALL_LINES_2']\n # set up storage\n llpos_all, xxpos_all, ampl_all = [], [], []\n m_fp_all, weight_bl_all, order_rec_all, dopd_all = [], [], [], []\n ll_prev, m_prev = np.array([]), np.array([])\n # loop through the orders from red to blue\n for order_num in range(n_ord_final_fp, n_ord_start_fp - 1, -1):\n # define storage\n floc = dict()\n # select the lines in the order\n gg = loc['ORDPEAK'] == order_num\n # store the initial wavelengths of the lines\n # floc['llpos'] = np.polynomial.chebyshev.chebval(\n # loc['XPEAK'][gg],\n # loc['LITTROW_EXTRAP_PARAM_1'][order_num])\n floc['llpos'] = np.polyval(\n loc['LITTROW_EXTRAP_PARAM_1'][order_num][::-1],\n loc['XPEAK'][gg])\n # store the pixel positions of the lines\n floc['xxpos'] = loc['XPEAK'][gg]\n # get the median pixel difference between successive lines\n # (to check for gaps)\n xxpos_diff_med = np.nanmedian(floc['xxpos'][1:] - floc['xxpos'][:-1])\n # store the amplitudes of the lines\n floc['ampl'] = loc['AMPPEAK'][gg]\n # store the values of the blaze at the pixel positions of the lines\n floc['weight_bl'] = np.zeros_like(floc['llpos'])\n # get and normalize blaze for the order\n nblaze = loc['BLAZE'][order_num] / np.nanmax(loc['BLAZE'][order_num])\n for it in range(1, len(floc['llpos'])):\n floc['weight_bl'][it] = nblaze[int(np.round(floc['xxpos'][it]))]\n # store the order numbers\n floc['order_rec'] = loc['ORDPEAK'][gg]\n # set up storage for line numbers\n mpeak = np.zeros_like(floc['llpos'])\n # line number for the last (reddest) line of the order (by FP equation)\n mpeak[-1] = int(dopd0 / floc['llpos'][-1])\n # calculate successive line numbers\n for it in range(len(floc['llpos']) - 2, -1, -1):\n # check for gap in x positions\n flocdiff = floc['xxpos'][it + 1] - floc['xxpos'][it]\n lowcond = xxpos_diff_med - (0.25 * xxpos_diff_med)\n highcond = xxpos_diff_med + (0.25 * xxpos_diff_med)\n if lowcond < flocdiff < highcond:\n # no gap: add 1 to line number of previous line\n mpeak[it] = mpeak[it + 1] + 1\n # if there is a gap, fix it\n else:\n # get line x positions\n flocx0 = floc['xxpos'][it]\n flocx1 = floc['xxpos'][it + 1]\n # get line wavelengths\n floc0 = floc['llpos'][it]\n floc1 = floc['llpos'][it + 1]\n # estimate the number of peaks missed\n m_offset = int(np.round((flocx1 - flocx0) / xxpos_diff_med))\n # add to m of previous peak\n mpeak[it] = mpeak[it + 1] + m_offset\n # verify there's no dopd jump, fix if present\n dopd_1 = (mpeak[it] * floc0 - dopd0) * 1.e-3\n dopd_2 = (mpeak[it + 1] * floc1 - dopd0) * 1.e-3\n # do loops to check jumps\n if dopd_1 - dopd_2 > fp_large_jump:\n while (dopd_1 - dopd_2) > fp_large_jump:\n mpeak[it] = mpeak[it] - 1\n dopd_1 = (mpeak[it] * floc0 - dopd0) * 1.e-3\n dopd_2 = (mpeak[it + 1] * floc1 - dopd0) * 1.e-3\n elif dopd_1 - dopd_2 < -fp_large_jump:\n while (dopd_1 - dopd_2) < -fp_large_jump:\n mpeak[it] = mpeak[it] + 1\n dopd_1 = (mpeak[it] * floc0 - dopd0) * 1.e-3\n dopd_2 = (mpeak[it + 1] * floc1 - dopd0) * 1.e-3\n # determination of observed effective cavity width\n dopd_t = mpeak * floc['llpos']\n # store m and d\n floc['m_fp'] = mpeak\n floc['dopd_t'] = dopd_t\n # for orders other than the reddest, attempt to cross-match\n if order_num != n_ord_final_fp:\n # check for overlap\n if floc['llpos'][cm_ind] > ll_prev[0]:\n # find closest peak in overlap and get its m value\n ind = np.abs(ll_prev - floc['llpos'][cm_ind]).argmin()\n # the peak matching the reddest may not always be found!!\n # define maximum permitted difference\n llpos_diff_med = np.nanmedian(\n floc['llpos'][1:] - floc['llpos'][:-1])\n # print(llpos_diff_med)\n # print(abs(ll_prev[ind] - floc['llpos'][-1]))\n # check if the difference is over the limit\n if abs(ll_prev[ind] - floc['llpos'][-1]) > 1.5 * llpos_diff_med:\n # print('overlap line not matched')\n ll_diff = ll_prev[ind] - floc['llpos'][-1]\n ind2 = -2\n # loop over next reddest peak until they match\n while ll_diff > 1.5 * llpos_diff_med:\n # check there is still overlap\n if floc['llpos'][ind2] > ll_prev[0]:\n ind = np.abs(ll_prev - floc['llpos'][ind2]).argmin()\n ll_diff = ll_prev[ind] - floc['llpos'][ind2]\n ind2 -= 1\n else:\n break\n m_match = m_prev[ind]\n # save previous mpeak calculated\n m_init = mpeak[cm_ind]\n # recalculate m if there's an offset from cross_match\n m_offset_c = m_match - m_init\n if m_offset_c != 0:\n mpeak = mpeak + m_offset_c\n # print note for dev if different\n if p['DRS_DEBUG']:\n wargs = [order_num, m_match - m_init]\n wmsg = 'M difference for order {0}: {1}'\n WLOG(p, '', wmsg.format(*wargs))\n # recalculate observed effective cavity width\n dopd_t = mpeak * floc['llpos']\n # store new m and d\n floc['m_fp'] = mpeak\n floc['dopd_t'] = dopd_t\n else:\n wmsg = 'No overlap for order {0}'\n WLOG(p, 'warning', wmsg.format(order_num))\n # save previous mpeak calculated\n m_init = mpeak[cm_ind]\n m_test = mpeak[cm_ind]\n # get dopd for last line of current & first of last order\n dopd_curr = (m_test * floc['llpos'][cm_ind] - dopd0) * 1.e-3\n dopd_prev = (m_prev[0] * ll_prev[0] - dopd0) * 1.e-3\n # do loops to check jumps\n if dopd_curr - dopd_prev > fp_large_jump:\n while (dopd_curr - dopd_prev) > fp_large_jump:\n m_test = m_test - 1\n dopd_curr = (m_test * floc['llpos'][cm_ind] - dopd0)\n dopd_curr = dopd_curr * 1.e-3\n elif dopd_curr - dopd_prev < -fp_large_jump:\n while (dopd_curr - dopd_prev) < -fp_large_jump:\n m_test = m_test + 1\n dopd_curr = (m_test * floc['llpos'][cm_ind] - dopd0)\n dopd_curr = dopd_curr * 1.e-3\n # recalculate m if there's an offset from cross_match\n m_offset_c = m_test - m_init\n if m_offset_c != 0:\n mpeak = mpeak + m_offset_c\n # print note for dev if different\n if p['DRS_DEBUG']:\n wargs = [order_num, mpeak[cm_ind] - m_init]\n wmsg = 'M difference for order {0}: {1}'\n WLOG(p, '', wmsg.format(*wargs))\n # recalculate observed effective cavity width\n dopd_t = mpeak * floc['llpos']\n # store new m and d\n floc['m_fp'] = mpeak\n floc['dopd_t'] = dopd_t\n\n # add to storage\n llpos_all += list(floc['llpos'])\n xxpos_all += list(floc['xxpos'])\n ampl_all += list(floc['ampl'])\n m_fp_all += list(floc['m_fp'])\n weight_bl_all += list(floc['weight_bl'])\n order_rec_all += list(floc['order_rec'])\n # difference in cavity width converted to microns\n dopd_all += list((floc['dopd_t'] - dopd0) * 1.e-3)\n # save numpy arrays of current order to be previous in next loop\n ll_prev = np.array(floc['llpos'])\n m_prev = np.array(floc['m_fp'])\n\n # convert to numpy arrays\n llpos_all = np.array(llpos_all)\n xxpos_all = np.array(xxpos_all)\n ampl_all = np.array(ampl_all)\n m_fp_all = np.array(m_fp_all)\n weight_bl_all = np.array(weight_bl_all)\n order_rec_all = np.array(order_rec_all)\n dopd_all = np.array(dopd_all)\n\n # fit a polynomial to line number v measured difference in cavity\n # width, weighted by blaze\n with warnings.catch_warnings(record=True) as w:\n coeffs = nanpolyfit(m_fp_all, dopd_all, fit_deg, w=weight_bl_all)[::-1]\n spirouCore.WarnLog(p, w, funcname=func_name)\n # get the values of the fitted cavity width difference\n cfit = np.polyval(coeffs[::-1], m_fp_all)\n # update line wavelengths using the new cavity width fit\n newll = (dopd0 + cfit * 1000.) / m_fp_all\n # insert fp lines into all_lines2 (at the correct positions)\n all_lines_2 = insert_fp_lines(p, newll, llpos_all, all_lines_2,\n order_rec_all, xxpos_all, ampl_all)\n\n # add to loc\n loc['FP_LL_POS'] = llpos_all\n loc['FP_XX_POS'] = xxpos_all\n loc['FP_M'] = m_fp_all\n loc['FP_DOPD_OFFSET'] = dopd_all\n loc['FP_AMPL'] = ampl_all\n loc['FP_LL_POS_NEW'] = newll\n loc['ALL_LINES_2'] = all_lines_2\n loc['FP_DOPD_OFFSET_COEFF'] = coeffs\n loc['FP_DOPD_OFFSET_FIT'] = cfit\n loc['FP_ORD_REC'] = order_rec_all\n # set sources\n sources = ['FP_LL_POS', 'FP_XX_POS', 'FP_M', 'FP_DOPD_OFFSET',\n 'FP_AMPL', 'FP_LL_POS_NEW', 'ALL_LINES_2',\n 'FP_DOPD_OFFSET_COEFF', 'FP_DOPD_OFFSET_FIT', 'FP_ORD_REC']\n loc.set_sources(sources, func_name)\n\n return loc", "def interpolate_bigger(arrayin,ny,nx=None):\r\n if nx == None :\r\n nx = ny\r\n arrayout = np.array(arrayin,dtype=np.complex128)\r\n arrayout = fft2(arrayout)\r\n arrayout = padd(arrayout,ny,nx)\r\n arrayout = ifft2(arrayout)\r\n return np.array(arrayout,dtype=arrayin.dtype)", "def test_1d_linear_interpolation_basic(self):\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n\n # Define array with corresponding values\n A = numpy.zeros((len(x)))\n\n # Define values for each xas a linear function\n for i in range(len(x)):\n A[i] = linear_function(x[i], 0)\n\n # Test first that original points are reproduced correctly\n for i, xi in enumerate(x):\n val = interpolate1d(x, A, [xi], mode='linear')[0]\n ref = linear_function(xi, 0)\n assert numpy.allclose(val, ref, rtol=1e-12, atol=1e-12)\n\n # Then test that genuinly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 10)\n points = xis\n\n vals = interpolate1d(x, A, points, mode='linear')\n refs = linear_function(points, 0)\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)\n\n # Exercise bounds_error flag\n vals = interpolate1d(x, A, points, mode='linear',\n bounds_error=True)\n refs = linear_function(points, 0)\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)", "def _linear_wcs_fit(params, lon, lat, x, y, w_obj): # pragma: no cover\n cd = params[0:4]\n crpix = params[4:6]\n\n w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3]))\n w_obj.wcs.crpix = crpix\n lon2, lat2 = w_obj.wcs_pix2world(x, y, 0)\n\n resids = np.concatenate((lon-lon2, lat-lat2))\n resids[resids > 180] = 360 - resids[resids > 180]\n resids[resids < -180] = 360\t+ resids[resids < -180]\n\n return resids", "def datasetratiocopy_xr_extend(l,ratio,x_offset,y_offset):#只延伸上下两边以及右边的点\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01: \r\n if pos_x>0: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset\r\n else:\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def interpolate(self, s1, s2, num_points):\n edge = [tuple(s1)]\n length = self.distance_bw_states(s1, s2)\n d_l = length/num_points\n curr_len = d_l\n while True:\n #Do linear interpolation\n temp = [s1[i] + (s2[i] - s1[i])*(curr_len/length) for i in range(self.ndims)]\n #Update curr_len\n curr_len+= d_l\n if curr_len > length: break\n #Append temp to edge\n edge.append(tuple(temp))\n #Add final state to edge\n edge.append(tuple(s2))\n return edge", "def RecursiveLowPassFast(signal, coeff, self):\n # Creates running mean value of the input\n ml = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], signal) \n # Plot Running threshold value at the current plot\n self.p1.plot(self.t, ml, pen=pg.mkPen(color=(246, 178, 255), width=3))\n\n # Creates running square deviation from the mean\n vl = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], np.square(signal - ml))\n # Creates \"threshold line\". If current value < sl[i] -> i belongs to event. \n sl = ml - coeff['S'] * np.sqrt(vl)\n self.p1.plot(self.t, sl, pen=pg.mkPen(color=(173, 27, 183), width=3))\n # Finds the length of the initial signal\n Ni = len(signal)\n # Finds those points where signal less than \"threshold line\"\n points = np.array(np.where(signal<=sl)[0])\n to_pop=np.array([]) # Empty supplementary array for finding adjacent points \n # For loop for finding adjacent points \n for i in range(1,len(points)):\n if points[i] - points[i - 1] == 1:\n to_pop=np.append(to_pop, i)\n # Points contain only border points of events\n points = np.delete(points, to_pop)\n # Empty list for Event location storage\n RoughEventLocations = []\n NumberOfEvents=0 #Number of events\n\n # For Loop for finding separating edges of different events and satisfying Event length limits\n for i in points:\n if NumberOfEvents is not 0:\n if i >= RoughEventLocations[NumberOfEvents-1][0] and i <= RoughEventLocations[NumberOfEvents-1][1]:\n continue\n NumberOfEvents += 1\n start = i\n El = ml[i] - coeff['E'] * np.sqrt(vl[i])\n Mm = ml[i]\n Vv = vl[i]\n duration = 0\n while signal[i + 1] < El and i < (Ni - 2) and duration < coeff['eventlengthLimit']:\n duration += 1\n i += 1\n if duration >= coeff['eventlengthLimit'] or i > (Ni - 10):\n NumberOfEvents -= 1\n else:\n k = start\n while signal[k] < Mm and k > 1:\n k -= 1\n start = k - 1\n k2 = i + 1\n while signal[k2] > Mm:\n k2 -= 1\n endp = k2\n if start<0:\n start=0\n RoughEventLocations.append((start, endp, ml[start], vl[start]))\n\n return np.array(RoughEventLocations)", "def interpolV(y, x, newX):\r\n \r\n num = len(x)\r\n #if (num != len(y)):\r\n #//System.out.println(\"Toolbox.interpolV(): Old x and y must be same length\"); \r\n \r\n newNum = len(newX)\r\n #//System.out.println(\"interpolV: newNum \" + newNum + \" num \" + num); \r\n #newY = [0.0 for i in range(newNum)]\r\n\r\n#//Renormalize ordinates:\r\n \r\n iMinAndMax = minMax(y)\r\n norm = y[iMinAndMax[1]]\r\n #//System.out.println(\"norm \" + norm);\r\n #yNorm = [0.0 for i in range(num)]\r\n newYNorm = [0.0 for i in range(newNum)] \r\n #for i in range(num):\r\n # yNorm[i] = y[i] / norm \r\n yNorm = [ x / norm for x in y ]\r\n\r\n#// Set any newX elements that are *less than* the first x element to th first \r\n#// x element - \"0th order extrapolation\"\r\n#//\r\n start = 0\r\n for i in range(newNum):\r\n if (newX[i] <= x[1]):\r\n newYNorm[i] = yNorm[0]\r\n start += 1\r\n \r\n if (newX[i] > x[1]):\r\n break\r\n \r\n \r\n#//System.out.println(\"start \" + start);\r\n#//System.out.println(\"x[0] \" + x[0] + \" x[1] \" + x[1] + \" newX[start] \" + newX[start]);\r\n#double jWght, jm1Wght, denom;\r\n\r\n\r\n if (start < newNum-1):\r\n\r\n j = 1 #//initialize old abscissae index\r\n #//outer loop over new abscissae\r\n for i in range(start, newNum):\r\n\r\n #//System.out.println(\"i \" + i + \" j \" + j);\r\n\r\n#// break out if current element newX is *greater* that last x element\r\n if ( (newX[i] > x[num-1]) or (j > (num-1)) ):\r\n break \r\n \r\n\r\n while (x[j] < newX[i]): \r\n j += 1\r\n \r\n #//System.out.println(\"i \" + i + \" newX[i] \" + newX[i] + \" j \" + j + \" x[j-1] \" + x[j-1] + \" x[j] \" + x[j]);\r\n #//1st order Lagrange method:\r\n jWght = newX[i] * (1.0 - (x[j-1]/newX[i])) #//(newX[i]-x[j-1])\r\n jm1Wght = x[j] * (1.0 - (newX[i]/x[j])) #//(x[j]-newX[i])\r\n denom = x[j] * (1.0 - (x[j-1]/x[j])) #//(x[j]-x[j-1])\r\n jWght = jWght / denom\r\n jm1Wght = jm1Wght / denom\r\n #//newYNorm[i] = (yNorm[j]*(newX[i]-x[j-1])) + (yNorm[j-1]*(x[j]-newX[i]));\r\n newYNorm[i] = (yNorm[j]*jWght) + (yNorm[j-1]*jm1Wght)\r\n #//System.out.println(\"i \" + i + \" newYNorm[i] \" + newYNorm[i] + \" j \" + j + \" yNorm[j-1] \" + yNorm[j-1] + \" yNorm[j] \" + yNorm[j]);\r\n \r\n\r\n#// Set any newX elements that are *greater than* the first x element to the last \r\n#// x element - \"0th order extrapolation\"\r\n#//\r\n for i in range(newNum):\r\n if (newX[i] >= x[num-1]):\r\n newYNorm[i] = yNorm[num-1]\r\n \r\n \r\n\r\n #//Restore orinate scale\r\n #for i in range(newNum):\r\n # newY[i] = newYNorm[i] * norm \r\n newY = [ x * norm for x in newYNorm ]\r\n\r\n\r\n return newY", "def polylinedictarraycopy(d):#d——原始图层多段线字典 \r\n dictlist=[]\r\n ratiolist=[] #放缩率列表\r\n rationumaccumulationlist=[] #放缩率数量累加列表\r\n \r\n eachrationum=globalconfig.X_ARRAY_NUM//globalconfig.RATIO_NUM\r\n leftrationum=globalconfig.X_ARRAY_NUM%globalconfig.RATIO_NUM\r\n \r\n eachrationumlist=[eachrationum]*globalconfig.RATIO_NUM #各个放缩率对应数量的列表\r\n \r\n for i in range((globalconfig.RATIO_NUM-1)//2-(leftrationum-1)//2,(globalconfig.RATIO_NUM-1)//2-(leftrationum-1)//2+leftrationum):\r\n eachrationumlist[i]=eachrationumlist[i]+1 #将整除后的余值加入到靠中间放缩率的方案中。\r\n \r\n rationumaccumulationlist.append(0) \r\n \r\n for i in range(1,globalconfig.RATIO_NUM): #计算放缩率数量累加列表\r\n rationumaccumulationlist.append(rationumaccumulationlist[i-1]+eachrationumlist[i-1])\r\n \r\n for i in range(0,globalconfig.RATIO_NUM): #计算放缩率列表\r\n ratiolist.append((globalconfig.CENTER_RATIO-((globalconfig.RATIO_NUM+1)//2-1)*globalconfig.RATIO_DIFF)+i*globalconfig.RATIO_DIFF) \r\n \r\n for i in range(0,globalconfig.RATIO_NUM): #每种放缩率\r\n for j in range(0,eachrationumlist[i]): #每种放缩率对应数量\r\n newdict={}\r\n for e in d: #将字典中值即每一图层对应的多段线列表进行复制并移动到指定位置\r\n newdict[e]=polylinedatasetarraycopy(d[e],ratiolist[i],globalconfig.CUTLINE_X_OFFSET+globalconfig.X_BLANK+(rationumaccumulationlist[i]+j+0.5)*globalconfig.X_LENGTH/globalconfig.CENTER_RATIO,globalconfig.CUTLINE_Y_OFFSET+globalconfig.Y_BLANK+0.5*globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO,e,len(dictlist)) \r\n #newdict.append([e,polylinedatasetarraycopy(d[e],ratiolist[i],globalconfig.CUTLINE_X_OFFSET+globalconfig.X_BLANK+(rationumaccumulationlist[i]+j+0.5)*globalconfig.X_LENGTH/globalconfig.CENTER_RATIO,globalconfig.CUTLINE_Y_OFFSET+globalconfig.Y_BLANK+0.5*globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO,e,len(dictlist))])\r\n dictlist.append(newdict) \r\n return (dictlist,ratiolist,eachrationumlist)", "def upsample_bilinear(input, size=None, scale_factor=None):\n return interpolate(input, size, scale_factor, 'linear', align_corners=True)", "def interpolate(self, image):\n return", "def _interpolate_spectrum(sp1, sp2, par):\n spectrum1 = sp1.pop()\n spectrum2 = sp2.pop()\n par1 = sp1.pop()\n par2 = sp2.pop()\n\n if par1 == par2:\n sp = spectrum1\n else:\n a = (par1 - par) / (par1 - par2)\n b = 1.0 - a\n sp = a * spectrum2 + b * spectrum1\n\n result = [member for member in sp1]\n result.append(sp)\n\n return result", "def get_interpolator(self, interpolatortype='FDI', nelements=1e4,\n buffer=0.2, element_volume = None, **kwargs):\n # get an interpolator for\n interpolator = None\n bb = np.copy(self.bounding_box)\n # add a buffer to the interpolation domain, this is necessary for\n # faults but also generally a good\n # idea to avoid boundary problems\n # buffer = bb[1, :]\n buffer = (np.min(bb[1,:]-bb[0,:]))*buffer\n bb[0, :] -= buffer # *(bb[1,:]-bb[0,:])\n bb[1, :] += buffer # *(bb[1,:]-bb[0,:])\n box_vol = (bb[1, 0]-bb[0, 0]) * (bb[1, 1]-bb[0, 1]) * (bb[1, 2]-bb[0, 2])\n if interpolatortype == \"PLI\":\n if element_volume is None:\n # nelements /= 5\n element_volume = box_vol / nelements\n # calculate the step vector of a regular cube\n step_vector = np.zeros(3)\n step_vector[:] = element_volume ** (1. / 3.)\n # step_vector /= np.array([1,1,2])\n # number of steps is the length of the box / step vector\n nsteps = np.ceil((bb[1, :] - bb[0, :]) / step_vector).astype(int)\n if np.any(np.less(nsteps, 3)):\n axis_labels = ['x','y','z']\n for i in range(3):\n if nsteps[i] < 3:\n logger.error(\"Number of steps in direction {} is too small, try increasing nelements\".format(axis_labels[i]))\n logger.error(\"Cannot create interpolator: number of steps is too small\")\n raise ValueError(\"Number of steps too small cannot create interpolator\")\n # create a structured grid using the origin and number of steps\n if self.reuse_supports:\n mesh_id = 'mesh_{}'.format(nelements)\n mesh = self.support.get(mesh_id,\n TetMesh(origin=bb[0, :], nsteps=nsteps,\n step_vector=step_vector))\n if mesh_id not in self.support:\n self.support[mesh_id] = mesh\n else:\n if 'meshbuilder' in kwargs:\n mesh = kwargs['meshbuilder'](bb,nelements)\n else:\n mesh = TetMesh(origin=bb[0, :], nsteps=nsteps, step_vector=step_vector)\n logger.info(\"Creating regular tetrahedron mesh with %i elements \\n\"\n \"for modelling using PLI\" % (mesh.ntetra))\n\n return PLI(mesh)\n\n if interpolatortype == 'FDI':\n\n # find the volume of one element\n if element_volume is None:\n element_volume = box_vol / nelements\n # calculate the step vector of a regular cube\n step_vector = np.zeros(3)\n step_vector[:] = element_volume ** (1. / 3.)\n # number of steps is the length of the box / step vector\n nsteps = np.ceil((bb[1, :] - bb[0, :]) / step_vector).astype(int)\n if np.any(np.less(nsteps, 3)):\n logger.error(\"Cannot create interpolator: number of steps is too small\")\n axis_labels = ['x','y','z']\n for i in range(3):\n if nsteps[i] < 3:\n logger.error(\"Number of steps in direction {} is too small, try increasing nelements\".format(axis_labels[i]))\n raise ValueError(\"Number of steps too small cannot create interpolator\")\n # create a structured grid using the origin and number of steps\n if self.reuse_supports:\n grid_id = 'grid_{}'.format(nelements)\n grid = self.support.get(grid_id, StructuredGrid(origin=bb[0, :],\n nsteps=nsteps,\n step_vector=step_vector))\n if grid_id not in self.support:\n self.support[grid_id] = grid\n else:\n grid = StructuredGrid(origin=bb[0, :], nsteps=nsteps,step_vector=step_vector)\n logger.info(\"Creating regular grid with %i elements \\n\"\n \"for modelling using FDI\" % grid.n_elements)\n return FDI(grid)\n\n if interpolatortype == \"DFI\": # \"fold\" in kwargs:\n if element_volume is None:\n nelements /= 5\n element_volume = box_vol / nelements\n # calculate the step vector of a regular cube\n step_vector = np.zeros(3)\n step_vector[:] = element_volume ** (1. / 3.)\n # number of steps is the length of the box / step vector\n nsteps = np.ceil((bb[1, :] - bb[0, :]) / step_vector).astype(int)\n # create a structured grid using the origin and number of steps\n if 'meshbuilder' in kwargs:\n mesh = kwargs['meshbuilder'].build(bb,nelements)\n else:\n mesh = kwargs.get('mesh', TetMesh(origin=bb[0, :], nsteps=nsteps,\n step_vector=step_vector))\n logger.info(\"Creating regular tetrahedron mesh with %i elements \\n\"\n \"for modelling using DFI\" % mesh.ntetra)\n return DFI(mesh, kwargs['fold'])\n if interpolatortype == 'Surfe' or interpolatortype == 'surfe':\n # move import of surfe to where we actually try and use it\n try:\n from LoopStructural.interpolators.surfe_wrapper import \\\n SurfeRBFInterpolator as Surfe\n\n surfe = True\n\n except ImportError:\n surfe = False\n if not surfe:\n logger.warning(\"Cannot import Surfe, try another interpolator\")\n raise ImportError('Cannot import surfepy, try pip install surfe')\n method = kwargs.get('method', 'single_surface')\n logger.info(\"Using surfe interpolator\")\n return Surfe(method)\n logger.warning(\"No interpolator\")\n raise InterpolatorError(\"Could not create interpolator\")", "def GetScaleBlocks(width):\n\n rord=numpy.log10(abs(width)/2.0)\n nrord=rord % 1\n\n if nrord < numpy.log10(2):\n spc=0.2*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n elif nrord < numpy.log10(5):\n spc=0.5*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4]\n else:\n spc=pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=spc*5\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n\n if len(newspc) == 5:\n #labels=['0',None,\"%g\" % smallspc*2,None,\"%g\" % (smallspc*4)]\n labels=['0',None,None,None,\"%g\" % (smallspc*4)]\n else:\n labels=['0',None,None,None,None,\"%g\" % (smallspc*5)]\n\n temp_max=newspc[len(newspc)-1]\n start=temp_max\n for temp in numpy.arange(start,width-bigspc/2,bigspc):\n temp_max=temp_max+bigspc\n newspc.append(temp_max)\n labels.append(\"%g\" % temp_max)\n\n #start=temp_max\n #for temp in Numeric.arange(start,width-smallspc/2,smallspc):\n # labels.append(None)\n # temp_max=temp_max+smallspc \n # newspc.append(temp_max) \n\n return (numpy.array(newspc,numpy.float32),labels)", "def match_wl(wl, spec, ref_wl, method=\"scipy\", kind=\"linear\"):\n starttime = time.time()\n if method == \"scipy\":\n #print(kind + \" scipy interpolation\")\n linear_interp = interp1d(wl, spec, kind=kind)\n new_spec = linear_interp(ref_wl)\n elif method == \"numpy\":\n if kind.lower() is not \"linear\":\n print(\"Warning: Cannot do \" + kind + \" interpolation with numpy, switching to linear\" )\n #print(\"Linear numpy interpolation\")\n new_spec = np.interp(ref_wl, wl, spec) # 1-d peicewise linear interpolat\n else:\n print(\"Method was given as \" + method)\n raise(\"Not correct interpolation method specified\")\n #print(\"Interpolation Time = \" + str(time.time() - starttime) + \" seconds\")\n\n return new_spec # test inperpolations ", "def interpolate_lightcurve(light_curve, samples_per_frame_time, frame_time):\n time_units = light_curve['times'].unit\n flux_units = light_curve['fluxes'].unit\n divisor = samples_per_frame_time - 1.\n points = np.arange(light_curve['times'][0].value, light_curve['times'][-1].value, frame_time/divisor)\n light_curve[\"fluxes\"] = np.interp(points, light_curve['times'].value, light_curve['fluxes'].value) * flux_units\n light_curve[\"times\"] = points * time_units\n return light_curve", "def power_points():\n next_reading = power_readings()\n stretch = []\n\n def next():\n nonlocal stretch, next_reading\n stretch.append(next_reading())\n if len(stretch) > XMAX + 1:\n stretch.pop(0)\n x = XMAX + 1 - len(stretch)\n points = []\n for y in stretch:\n points.append((x, y))\n points.append((x, 0))\n x += 1\n return points\n\n return next", "def perform_point_interpolation(sub_sample_wvl, sub_sample_rad, center_wv):\n # let us define spectral resolution\n\n print(center_wv)\n dframe = pd.DataFrame()\n\n sampled_wvl = np.arange(min(sub_sample_wvl), max(sub_sample_wvl), 2)\n fit_params = interp1d(sub_sample_wvl, sub_sample_rad, kind='slinear')\n fitted_val = fit_params(sampled_wvl)\n dframe['wavelength'] = sampled_wvl\n dframe['rad'] = fitted_val\n return dframe", "def forward(self, LL, LH, HL, HH):\n assert len(LL.size()) == len(LH.size()) == len(\n HL.size()) == len(HH.size()) == 4\n self.input_height = LL.size()[-2] + HH.size()[-2]\n self.input_width = LL.size()[-1] + HH.size()[-1]\n self.get_matrix()\n return IDWTFunction_2D.apply(LL, LH, HL, HH, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0, self.matrix_high_1)", "def spline(bin_size, offsets):\n x, y = zip(*offsets)\n xs = np.arange(x[0], x[-1], 120)\n\n # array of knots (with the start and end which is addes automatically 13 knots meanaing 12 pieces)\n t = [offsets[0][0] + bin_size, offsets[0][0] + bin_size * 2,\n offsets[0][0] + 3 * bin_size, offsets[0][0] + 4 * bin_size,\n offsets[0][0] + 5 * bin_size, offsets[0][0] + 6 * bin_size,\n offsets[0][0] + 7 * bin_size, offsets[0][0] + 8 * bin_size,\n offsets[0][0] + 9 * bin_size, offsets[0][0] + 10 * bin_size,\n offsets[0][0] + 11 * bin_size]\n\n # compute a spline polynomial of degree 3 over 5 equal pieces for the y points over steps of 1 sec on the x axis.\n try:\n spl = LSQUnivariateSpline(x, y, t, w=None, bbox=[None, None], k=3)\n except ValueError as e:\n logging.error(\"ERROR: LSQUnivariateSpline ValueError failed with error {} and params x {} y {} t {} \".format(e, x, y, t))\n raise\n spl_deriv = spl.derivative(1) # derivative of degree one\n orig_curve = spl(xs)\n deriv_curve = spl_deriv(xs)\n\n return orig_curve, deriv_curve, xs", "def InterpolationDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def interpolate_matrix(matrix):", "def linear_int(x, y, mode=\"interp1d\"):\n if mode == \"interp1d\":\n fit = interp1d(x, y, fill_value=\"extrapolate\")\n else:\n params = n_ord_interp(x, y)\n fit = np.poly1d(params)\n\n x = np.arange(0, 2400)\n\n return fit(x)", "def discretized_line(x_start, y_start, x_end, y_end, n_elements):\n n_pts = n_elements + 1\n x = np.linspace(x_start, x_end, n_pts)\n y = np.linspace(y_start, y_end, n_pts)\n x1 = x[:-1]\n y1 = y[:-1]\n x2 = x[1:]\n y2 = y[1:]\n return x1, y1, x2, y2", "def ex2d(image, ivar, psf, specrange, wavelengths, xyrange=None,\n full_output=False, regularize=0.0):\n\n #- Range of image to consider\n waverange = (wavelengths[0], wavelengths[-1])\n \n if xyrange is None:\n xmin, xmax, ymin, ymax = xyrange = psf.xyrange(specrange, waverange)\n image = image[ymin:ymax, xmin:xmax]\n ivar = ivar[ymin:ymax, xmin:xmax]\n else:\n xmin, xmax, ymin, ymax = xyrange\n\n nx, ny = xmax-xmin, ymax-ymin\n npix = nx*ny\n \n nspec = specrange[1] - specrange[0]\n nwave = len(wavelengths)\n \n #- Solve AT W pix = (AT W A) flux\n \n #- Projection matrix and inverse covariance\n A = psf.projection_matrix(specrange, wavelengths, xyrange)\n\n #- Pixel weights matrix\n w = ivar.ravel()\n W = spdiags(ivar.ravel(), 0, npix, npix)\n\n #-----\n #- Extend A with an optional regularization term to limit ringing.\n #- If any flux bins don't contribute to these pixels,\n #- also use this term to constrain those flux bins to 0.\n \n #- Original: exclude flux bins with 0 pixels contributing\n # ibad = (A.sum(axis=0).A == 0)[0]\n \n #- Identify fluxes with very low weights of pixels contributing \n fluxweight = W.dot(A).sum(axis=0).A[0]\n minweight = 0.01*np.max(fluxweight)\n ibad = fluxweight < minweight\n \n #- Add regularization of low weight fluxes\n I = regularize*scipy.sparse.identity(nspec*nwave)\n I.data[0,ibad] = minweight - fluxweight[ibad]\n \n #- Only need to extend A if regularization is non-zero\n if np.any(I.data):\n pix = np.concatenate( (image.ravel(), np.zeros(nspec*nwave)) )\n Ax = scipy.sparse.vstack( (A, I) )\n wx = np.concatenate( (w, np.ones(nspec*nwave)) )\n else:\n pix = image.ravel()\n Ax = A\n wx = w\n\n #- Inverse covariance\n Wx = spdiags(wx, 0, len(wx), len(wx))\n iCov = Ax.T.dot(Wx.dot(Ax))\n \n #- Solve (image = A flux) weighted by Wx:\n #- A^T W image = (A^T W A) flux = iCov flux \n y = Ax.T.dot(Wx.dot(pix))\n \n xflux = spsolve(iCov, y).reshape((nspec, nwave))\n\n #- Solve for Resolution matrix\n try:\n R, fluxivar = resolution_from_icov(iCov)\n except np.linalg.linalg.LinAlgError, err:\n outfile = 'LinAlgError_{}-{}_{}-{}.fits'.format(specrange[0], specrange[1], waverange[0], waverange[1])\n print \"ERROR: Linear Algebra didn't converge\"\n print \"Dumping {} for debugging\".format(outfile)\n import fitsio\n fitsio.write(outfile, image, clobber=True)\n fitsio.write(outfile, ivar, extname='IVAR')\n fitsio.write(outfile, A.data, extname='ADATA') \n fitsio.write(outfile, A.indices, extname='AINDICES')\n fitsio.write(outfile, A.indptr, extname='AINDPTR')\n fitsio.write(outfile, iCov.toarray(), extname='ICOV')\n raise err\n \n #- Convolve with Resolution matrix to decorrelate errors\n fluxivar = fluxivar.reshape((nspec, nwave))\n rflux = R.dot(xflux.ravel()).reshape(xflux.shape)\n\n if full_output:\n results = dict(flux=rflux, ivar=fluxivar, R=R, xflux=xflux, A=A)\n results['iCov'] = iCov\n return results\n else:\n return rflux, fluxivar, R", "def scale_sky_spectrum(wlm, sky_spectrum, spectra, cut_sky=4., fmax=10, fmin=1, valid_wave_min=0, valid_wave_max=0, \n fibre_list=[100,200,300,400,500,600,700,800,900], plot=True, verbose=True, warnings=True): \n \n# # Read sky lines provided by 2dFdr\n# sky_line_,flux_sky_line_ = read_table(\"sky_lines_2dfdr.dat\", [\"f\", \"f\"] )\n# # Choose those lines in the range\n# sky_line=[]\n# flux_sky_line=[]\n# valid_wave_min = 6240\n# valid_wave_max = 7355\n# for i in range(len(sky_line_)):\n# if valid_wave_min < sky_line_[i] < valid_wave_max:\n# sky_line.append(sky_line_[i])\n# flux_sky_line.append(flux_sky_line_[i])\n \n \n if valid_wave_min == 0: valid_wave_min = wlm[0]\n if valid_wave_max == 0: valid_wave_max = wlm[-1]\n \n if verbose: print(\"\\n> Identifying sky lines using cut_sky =\",cut_sky,\", allowed SKY/OBJ values = [\",fmin,\",\",fmax,\"]\")\n if verbose: print(\" Using fibres = \",fibre_list)\n\n peaks,peaks_name,peaks_rest,continuum_limits=search_peaks(wlm,sky_spectrum, plot=plot, cut=cut_sky, fmax=fmax, only_id_lines=False, verbose=False) \n\n ratio_list=[]\n valid_peaks=[]\n \n if verbose: print(\"\\n Sky line Gaussian ratio Flux ratio\")\n n_sky_lines_found=0\n for i in range(len(peaks)):\n sky_spectrum_data=fluxes(wlm,sky_spectrum, peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n \n sky_median_continuum = np.nanmedian(sky_spectrum_data[11])\n \n object_spectrum_data_gauss=[]\n object_spectrum_data_integrated=[] \n median_list=[]\n for fibre in fibre_list: \n object_spectrum_flux=fluxes(wlm, spectra[fibre], peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n object_spectrum_data_gauss.append(object_spectrum_flux[3]) # Gaussian flux is 3\n object_spectrum_data_integrated.append(object_spectrum_flux[7]) # integrated flux is 7\n median_list.append(np.nanmedian(object_spectrum_flux[11]))\n object_spectrum_data=np.nanmedian(object_spectrum_data_gauss)\n object_spectrum_data_i=np.nanmedian(object_spectrum_data_integrated)\n \n object_median_continuum=np.nanmin(median_list) \n \n if fmin < object_spectrum_data/sky_spectrum_data[3] * sky_median_continuum/object_median_continuum < fmax :\n n_sky_lines_found = n_sky_lines_found + 1\n valid_peaks.append(peaks[i])\n ratio_list.append(object_spectrum_data/sky_spectrum_data[3])\n if verbose: print(\"{:3.0f} {:5.3f} {:2.3f} {:2.3f}\".format(n_sky_lines_found,peaks[i],object_spectrum_data/sky_spectrum_data[3], object_spectrum_data_i/sky_spectrum_data[7])) \n\n\n #print \"ratio_list =\", ratio_list\n #fit = np.polyfit(valid_peaks, ratio_list, 0) # This is the same that doing an average/mean\n #fit_line = fit[0]+0*wlm\n fit_line =np.nanmedian(ratio_list) # We just do a median\n #fit_line = fit[1]+fit[0]*wlm\n #fit_line = fit[2]+fit[1]*wlm+fit[0]*wlm**2\n #fit_line = fit[3]+fit[2]*wlm+fit[1]*wlm**2+fit[0]*wlm**3\n \n \n if plot:\n plt.plot(valid_peaks,ratio_list,\"+\")\n #plt.plot(wlm,fit_line)\n plt.axhline(y=fit_line, color='k', linestyle='--')\n plt.xlim(valid_wave_min-10, valid_wave_max+10) \n #if len(ratio_list) > 0:\n plt.ylim(np.nanmin(ratio_list)-0.2,np.nanmax(ratio_list)+0.2)\n plt.title(\"Scaling sky spectrum to object spectra\")\n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n plt.ylabel(\"OBJECT / SKY\")\n plt.minorticks_on()\n plt.show()\n plt.close()\n \n if verbose: print(\" Using this fit to scale sky spectrum to object, the median value is \",np.round(fit_line,3),\"...\") \n \n sky_corrected = sky_spectrum * fit_line\n\n# plt.plot(wlm,sky_spectrum, \"r\", alpha=0.3)\n# plt.plot(wlm,sky_corrected, \"g\", alpha=0.3)\n# plt.show()\n# plt.close()\n \n return sky_corrected, np.round(fit_line,3)", "def interpolate_spline(y, N):\n l = len(y)\n x = np.linspace(0, l, l)\n spline = interpolate.InterpolatedUnivariateSpline(x,y)\n xnew = np.linspace(0, l, N*l)\n ynew = spline(xnew)\n return ynew", "def forward(self, x):\n\n x, _ = equiangular_calculator(x, self.ratio)\n x = x.permute(0, 3, 1, 2)\n x = F.interpolate(x, scale_factor=(self.kernel_size, self.kernel_size), mode=\"nearest\")\n x = reformat(x)\n return x", "def sRGBToSPD(rgb):\n global _LSSDATA\n rdata=_LSSDATA\n if rdata == None:\n rdata=_generateLSSData()\n _LSSDATA=rdata\n b11=rdata[0]\n b12=rdata[1]\n lin=linearFromsRGB3(rgb)\n rm=1e-5\n if lin[0]<=rm and lin[1]<=rm and lin[2]<=rm:\n return [rm for i in range(matShape(b12)[0])]\n # Implements Iterative Least Slope Squared algorithm\n linmat=matFromVec(lin)\n ret=matMul(b12,matT(linmat))\n shapelen=matShape(ret)[0]\n iters=0\n while True:\n iters+=1\n k1=[]\n k0=[]\n for r in range(shapelen):\n refl=matGet(ret,r,0)\n if refl>1:\n k1+=[[(1 if r==i else 0) for i in range(shapelen)]]\n if refl<=0:\n k0+=[[(1 if r==i else 0) for i in range(shapelen)]]\n k1len=len(k1)\n k0len=len(k0)\n if k1len+k0len==0:\n spdarray=[matGet(ret,i,0) for i in range(matShape(ret)[0])]\n break\n k1+=k0\n k=matNew(k1)\n cmat=[[1 if i<k1len else rm] for i in range(k0len+k1len)]\n cmat=matNew(cmat)\n tk=matT(k)\n ri=matI(matMul(matMul(k,b11),tk))\n rj=matSub(matMul(k,ret),cmat)\n rk=matMul(matMul(matMul(b11,tk),ri),rj)\n ret=matSub(ret,rk)\n for i in range(matShape(ret)[0]):\n s=matGet(ret,i,0)\n if s>1.0 and iters>20: matSet(ret,i,0,1.0) # Drastic measure to avoid overiteration\n if s<rm and iters>20: matSet(ret,i,0,rm)\n return SPD(spdarray,10,380,730)", "def plot_spectrum(inp='jw02767005001-02-clear-prism-nrs2-2767_11027.spec.fits', z=9.505, vel_width=100, bkg=None, scale_disp=1.3, nspline=27, show_cont=True, draws=100, figsize=(16, 8), ranges=[(3650, 4980)], Rline=1000, full_log=False, write=False, eazy_templates=None, use_full_dispersion=True, get_spl_templates=False, scale_uncertainty_kwargs=None, plot_unit=None, spline_single=True, sys_err=0.02, return_fit_results=False, use_aper_columns=False, label=None, **kwargs):\n global SCALE_UNCERTAINTY\n \n lw, lr = utils.get_line_wavelengths()\n \n if isinstance(inp, str):\n sampler = SpectrumSampler(inp, **kwargs)\n file = inp\n elif isinstance(inp, pyfits.HDUList):\n sampler = SpectrumSampler(inp, **kwargs)\n file = None\n else:\n file = None\n sampler = inp\n \n if (label is None) & (file is not None):\n label = os.path.basename(file)\n \n spec = sampler.spec\n \n if (use_aper_columns > 0) & ('aper_flux' in spec.colnames):\n if ('aper_corr' in spec.colnames) & (use_aper_columns > 1):\n ap_corr = spec['aper_corr']*1\n else:\n ap_corr = 1\n \n flam = spec['aper_flux']*spec['to_flam']*ap_corr\n eflam = spec['aper_full_err']*spec['to_flam']*ap_corr\n else:\n flam = spec['flux']*spec['to_flam']\n eflam = spec['full_err']*spec['to_flam']\n \n wrest = spec['wave']/(1+z)*1.e4\n wobs = spec['wave']\n mask = spec['valid']\n \n flam[~mask] = np.nan\n eflam[~mask] = np.nan\n \n bspl = sampler.bspline_array(nspline=nspline, get_matrix=True)\n\n # bspl = utils.bspline_templates(wave=spec['wave']*1.e4,\n # degree=3,\n # df=nspline)\n \n w0 = utils.log_zgrid([spec['wave'].min()*1.e4,\n spec['wave'].max()*1.e4], 1./Rline)\n \n templates, tline, _A = make_templates(sampler, z,\n bspl=bspl,\n eazy_templates=eazy_templates,\n vel_width=vel_width,\n scale_disp=scale_disp,\n use_full_dispersion=use_full_dispersion,\n disp=spec.disp,\n grating=spec.grating,\n **kwargs,\n )\n \n if scale_uncertainty_kwargs is not None:\n _, escl, _ = calc_uncertainty_scale(file=None,\n data=(spec, _A),\n **scale_uncertainty_kwargs)\n eflam *= escl\n spec['escale'] *= escl\n \n okt = _A[:,mask].sum(axis=1) > 0\n \n _Ax = _A[okt,:]/eflam\n _yx = flam/eflam\n \n if eazy_templates is None:\n _x = np.linalg.lstsq(_Ax[:,mask].T, \n _yx[mask], rcond=None)\n else:\n _x = nnls(_Ax[:,mask].T, _yx[mask])\n \n coeffs = np.zeros(_A.shape[0])\n coeffs[okt] = _x[0]\n \n _model = _A.T.dot(coeffs)\n _mline = _A.T.dot(coeffs*tline)\n _mcont = _model - _mline\n \n full_chi2 = ((flam - _model)**2/eflam**2)[mask].sum()\n cont_chi2 = ((flam - _mcont)**2/eflam**2)[mask].sum()\n \n if return_fit_results:\n return templates, coeffs, flam, eflam, _model, mask, full_chi2\n \n try:\n oktemp = okt & (coeffs != 0)\n \n AxT = (_A[oktemp,:]/eflam)[:,mask].T\n \n covar_i = utils.safe_invert(np.dot(AxT.T, AxT))\n covar = utils.fill_masked_covar(covar_i, oktemp)\n covard = np.sqrt(covar.diagonal())\n \n has_covar = True\n except:\n has_covar = False\n covard = coeffs*0.\n N = len(templates)\n covar = np.eye(N, N)\n \n print(f'\\n# line flux err\\n# flux x 10^-20 erg/s/cm2')\n if label is not None:\n print(f'# {label}')\n \n print(f'# z = {z:.5f}\\n# {time.ctime()}')\n \n cdict = {}\n eqwidth = {}\n \n for i, t in enumerate(templates):\n cdict[t] = [float(coeffs[i]), float(covard[i])]\n if t.startswith('line '):\n lk = t.split()[-1]\n \n # Equivalent width:\n # coeffs, line fluxes are in units of 1e-20 erg/s/cm2\n # _mcont, continuum model is in units of 1-e20 erg/s/cm2/A\n # so observed-frame equivalent width is roughly\n # eqwi = coeffs[i] / _mcont[ wave_obs[i] ]\n \n if lk in lw:\n lwi = lw[lk][0]*(1+z)/1.e4\n continuum_i = np.interp(lwi, spec['wave'], _mcont)\n eqwi = coeffs[i]/continuum_i\n else:\n eqwi = np.nan\n \n eqwidth[t] = eqwi\n \n print(f'{t:>20} {coeffs[i]:8.1f} ± {covard[i]:8.1f} (EW={eqwi:9.1f})')\n \n \n if 'srcra' not in spec.meta:\n spec.meta['srcra'] = 0.0\n spec.meta['srcdec'] = 0.0\n spec.meta['srcname'] = 'unknown'\n \n spec['model'] = _model/spec['to_flam']\n spec['mline'] = _mline/spec['to_flam']\n \n data = {'z': float(z),\n 'file':file,\n 'label':label,\n 'ra': float(spec.meta['srcra']),\n 'dec': float(spec.meta['srcdec']),\n 'name': str(spec.meta['srcname']),\n 'wmin':float(spec['wave'][mask].min()),\n 'wmax':float(spec['wave'][mask].max()),\n 'coeffs':cdict,\n 'covar':covar.tolist(),\n 'wave': [float(m) for m in spec['wave']],\n 'flux': [float(m) for m in spec['flux']],\n 'err': [float(m) for m in spec['err']],\n 'escale': [float(m) for m in spec['escale']],\n 'model': [float(m) for m in _model/spec['to_flam']],\n 'mline':[float(m) for m in _mline/spec['to_flam']],\n 'templates':templates, \n 'dof': int(mask.sum()), \n 'fullchi2': float(full_chi2), \n 'contchi2': float(cont_chi2),\n 'eqwidth': eqwidth,\n }\n \n for k in ['z','wmin','wmax','dof','fullchi2','contchi2']:\n spec.meta[k] = data[k]\n \n #fig, axes = plt.subplots(len(ranges)+1,1,figsize=figsize)\n if len(ranges) > 0:\n fig = plt.figure(figsize=figsize, constrained_layout=True)\n gs = GridSpec(2, len(ranges), figure=fig)\n axes = []\n for i, _ra in enumerate(ranges):\n axes.append(fig.add_subplot(gs[0,i]))\n \n axes.append(fig.add_subplot(gs[1,:]))\n \n else:\n fig, ax = plt.subplots(1,1,figsize=figsize)\n axes = [ax]\n \n _Acont = (_A.T*coeffs)[mask,:][:,:nspline]\n _Acont[_Acont < 0.001*_Acont.max()] = np.nan\n \n if (draws is not None) & has_covar:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n mu = np.random.multivariate_normal(coeffs[oktemp], covar_i, size=draws)\n \n #print('draws', draws, mu.shape, _A.shape)\n mdraws = _A[oktemp,:].T.dot(mu.T)\n else:\n mdraws = None\n \n if plot_unit is not None:\n unit_conv = (1*spec.meta['flamunit']).to(plot_unit,\n equivalencies=spec.equiv).value\n else:\n unit_conv = np.ones(len(wobs))\n \n for ax in axes:\n if 1:\n ax.errorbar(wobs, flam*unit_conv, eflam*unit_conv,\n marker='None', linestyle='None',\n alpha=0.5, color='k', ecolor='k', zorder=100)\n\n ax.step(wobs, flam*unit_conv, color='k', where='mid', lw=1, alpha=0.8)\n # ax.set_xlim(3500, 5100)\n\n #ax.plot(_[1]['templz']/(1+z), _[1]['templf'])\n \n ax.step(wobs[mask], (_mcont*unit_conv)[mask],\n color='pink', alpha=0.8, where='mid')\n ax.step(wobs[mask], (_model*unit_conv)[mask],\n color='r', alpha=0.8, where='mid')\n \n cc = utils.MPL_COLORS\n for w, c in zip([3727, 4980, 6565, 9070, 9530, 1.094e4, 1.282e4, \n 1.875e4], \n [cc['purple'], cc['b'], cc['g'], 'darkred', 'darkred', \n cc['pink'], cc['pink'], cc['pink']]):\n wz = w*(1+z)/1.e4\n dw = 70*(1+z)/1.e4\n ax.fill_between([wz-dw, wz+dw], [0,0], [100,100], \n color=c, alpha=0.07, zorder=-100)\n \n \n if mdraws is not None:\n ax.step(wobs[mask], (mdraws.T*unit_conv).T[mask,:],\n color='r', alpha=np.maximum(1./draws, 0.02), zorder=-100, where='mid')\n\n if show_cont:\n ax.plot(wobs[mask], (_Acont.T*unit_conv[mask]).T,\n color='olive', alpha=0.3)\n \n ax.fill_between(ax.get_xlim(), [-100, -100], [0, 0], color='0.8', \n alpha=0.5, zorder=-1)\n\n ax.fill_betweenx([0, 100], [0,0], [1215.67*(1+z)/1.e4]*2, \n color=utils.MPL_COLORS['orange'], alpha=0.2,\n zorder=-1)\n \n ax.grid()\n\n # axes[0].set_xlim(1000, 2500)\n # ym = 0.15; axes[0].set_ylim(-0.1*ym, ym)\n \n for i, r in enumerate(ranges):\n axes[i].set_xlim(*[ri*(1+z)/1.e4 for ri in r])\n # print('xxx', r)\n \n if spec.filter == 'clear':\n axes[-1].set_xlim(0.6, 5.29)\n axes[-1].xaxis.set_minor_locator(MultipleLocator(0.1))\n axes[-1].xaxis.set_major_locator(MultipleLocator(0.5))\n elif spec.filter == 'f070lp':\n axes[-1].set_xlim(0.69, 1.31)\n axes[-1].xaxis.set_minor_locator(MultipleLocator(0.02))\n elif spec.filter == 'f100lp':\n axes[-1].set_xlim(0.99, 1.91)\n axes[-1].xaxis.set_minor_locator(MultipleLocator(0.02))\n axes[-1].xaxis.set_major_locator(MultipleLocator(0.1))\n elif spec.filter == 'f170lp':\n axes[-1].set_xlim(1.69, 3.21)\n elif spec.filter == 'f290lp':\n axes[-1].set_xlim(2.89, 5.31)\n else:\n axes[-1].set_xlim(wrest[mask].min(), wrest[mask].max())\n \n axes[-1].set_xlabel(f'obs wavelenth, z = {z:.5f}')\n \n #axes[0].set_title(os.path.basename(file))\n \n for ax in axes:\n xl = ax.get_xlim()\n ok = wobs > xl[0]\n ok &= wobs < xl[1]\n ok &= np.abs(wrest-5008) > 100\n ok &= np.abs(wrest-6564) > 100\n ok &= mask\n if ok.sum() == 0:\n ax.set_visible(False)\n continue\n \n ymax = np.maximum((_model*unit_conv)[ok].max(), 10*np.median((eflam*unit_conv)[ok]))\n \n ymin = np.minimum(-0.1*ymax, -3*np.median((eflam*unit_conv)[ok]))\n ax.set_ylim(ymin, ymax*1.3)\n # print(xl, ymax)\n \n if ok.sum() > 0:\n if (np.nanmax((flam/eflam)[ok]) > 20) & (full_log):\n ax.set_ylim(0.005*ymax, ymax*5)\n ax.semilogy()\n \n if len(axes) > 0:\n gs.tight_layout(fig, pad=0.8)\n else:\n fig.tight_layout(pad=0.8)\n \n if label is not None:\n fig.text(0.015*12./12, 0.005, f'{label}',\n ha='left', va='bottom',\n transform=fig.transFigure, fontsize=8)\n \n fig.text(1-0.015*12./12, 0.005, time.ctime(),\n ha='right', va='bottom',\n transform=fig.transFigure, fontsize=6)\n \n \n return fig, spec, data", "def perform_spectral_interpolation(gaussian_data):\n\n dframe = pd.DataFrame()\n wavelength1 = gaussian_data[:, -1]\n\n sampled_wavelength1 = np.arange(min(wavelength1), max(wavelength1), 2)\n wavelength2 = gaussian_data[:, -1]\n sampled_wavelength2 = np.arange(min(wavelength2), max(wavelength2), 2)\n a1_val = gaussian_data[:, 0]\n a2_val = gaussian_data[:, 1]\n sigma1 = gaussian_data[:, 2]\n sigma2 = gaussian_data[:, 3]\n\n # A1 first\n fit_params_a1 = interp1d(wavelength1, a1_val, kind='linear')\n fitted_val_a1 = fit_params_a1(sampled_wavelength1)\n # Now A2\n fit_params_a2 = interp1d(wavelength2, a2_val, kind='linear')\n fitted_val_a2 = fit_params_a2(sampled_wavelength2)\n\n # Now Sigma1\n fit_params_sigma1 = interp1d(wavelength1, sigma1, kind='linear')\n fitted_val_sigma1 = fit_params_sigma1(sampled_wavelength1)\n\n # Now Sigma2\n fit_params_sigma2 = interp1d(wavelength2, sigma2, kind='slinear')\n fitted_val_sigma2 = fit_params_sigma2(sampled_wavelength2)\n\n\n# plt.plot(wavelength1, Sigma1, 'bo')\n# plt.plot(sampled_wavelength1, fitted_val_Sigma1, 'ro--', markersize=3)\n# plt.grid(True, linestyle=':')\n# plt.show()\n dframe = pd.DataFrame({'W1' : sampled_wavelength1,\n 'W2' : sampled_wavelength2,\n 'A1' : fitted_val_a1,\n 'A2' : fitted_val_a2,\n 'Sigma1' : fitted_val_sigma1,\n 'Sigma2' : fitted_val_sigma2,\n })\n\n return dframe.round(3)", "def interpolate(self, distance, normalized=...): # -> BaseGeometry:\n ...", "def _get_new_sizes(value, value_range, size_range):\n slope = (size_range[1] - size_range[0]) / float(value_range[1]- value_range[0])\n return size_range[0] + slope * (value - value_range[0])", "def InterpolateArcLength(X, Y, L):\n\n # length of X\n K = len(X)\n # initialize iX, iY\n iX = np.zeros((0,))\n iY = np.zeros((0,))\n # generate spaced points\n Interval = np.linspace(0, 1, L)\n # get segment lengths\n Lengths = np.sqrt(\n np.power(np.diff(X), 2) + np.power(np.diff(Y), 2)\n )\n # check Lengths\n if Lengths.size:\n # normalize to unit length\n Lengths = Lengths / Lengths.sum()\n # calculate cumulative length along boundary\n Cumulative = np.hstack((0., np.cumsum(Lengths)))\n # place points in 'Interval' along boundary\n Locations = np.digitize(Interval, Cumulative)\n # clip to ends\n Locations[Locations < 1] = 1\n Locations[Locations >= K] = K - 1\n Locations = Locations - 1\n # linear interpolation\n Lie = np.divide(\n (Interval - [Cumulative[i] for i in Locations]),\n [Lengths[i] for i in Locations]\n )\n tX = np.array([X[i] for i in Locations])\n tY = np.array([Y[i] for i in Locations])\n iX = tX + np.multiply(\n np.array([X[i+1] for i in Locations]) - tX, Lie\n )\n iY = tY + np.multiply(\n np.array([Y[i+1] for i in Locations]) - tY, Lie\n )\n iXY = collections.namedtuple('iXY', ['iX', 'iY'])\n Output = iXY(iX, iY)\n\n return Output", "def fit_1d_solution(p, loc, ll, iteration=0):\n\n func_name = __NAME__ + '.fit_1d_solution()'\n # get 1d solution\n loc = fit_1d_ll_solution(p, loc, ll, iteration)\n # invert solution\n loc = invert_1ds_ll_solution(p, loc, ll, iteration)\n # get the total number of orders to fit\n num_orders = len(loc['ALL_LINES_{0}'.format(iteration)])\n # get the dimensions of the data\n ydim, xdim = loc['HCDATA'].shape\n # get inv_params\n inv_params = loc['LL_PARAM_{0}'.format(iteration)]\n # set pixel shift to zero, as doesn't apply here\n pixel_shift_inter = 0\n pixel_shift_slope = 0\n # get new line list\n ll_out = spirouMath.get_ll_from_coefficients(pixel_shift_inter,\n pixel_shift_slope,\n inv_params, xdim, num_orders)\n # get the first derivative of the line list\n dll_out = spirouMath.get_dll_from_coefficients(inv_params, xdim, num_orders)\n # find the central pixel value\n centpix = ll_out.shape[1]//2\n # get the mean pixel scale (in km/s/pixel) of the central pixel\n norm = dll_out[:, centpix]/ll_out[:, centpix]\n meanpixscale = speed_of_light * np.nansum(norm)/len(ll_out[:, centpix])\n # get the total number of lines used\n total_lines = int(np.nansum(loc['X_ITER_2'][:, 2]))\n # add to loc\n loc['LL_OUT_{0}'.format(iteration)] = ll_out\n loc.set_source('LL_OUT_{0}'.format(iteration), func_name)\n loc['DLL_OUT_{0}'.format(iteration)] = dll_out\n loc.set_source('DLL_OUT_{0}'.format(iteration), func_name)\n loc['TOTAL_LINES_{0}'.format(iteration)] = total_lines\n loc.set_source('TOTAL_LINES_{0}'.format(iteration), func_name)\n # log message\n wmsg = 'On fiber {0} mean pixel scale at center: {1:.4f} [km/s/pixel]'\n WLOG(p, 'info', wmsg.format(p['FIBER'], meanpixscale))\n # return loc\n return loc", "def forward(self, inp):\n outp = []\n x = inp\n if self.resize_input:\n x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False)\n if self.normalize_input:\n x = 2 * x - 1\n for idx, block in enumerate(self.blocks):\n x = block(x)\n if idx in self.output_blocks:\n outp.append(x)\n if idx == self.last_needed_block:\n break\n return outp", "def match_wl(wl, spec, ref_wl, method=\"scipy\", kind=\"linear\", bounds_error=False):\n starttime = time.time()\n if method == \"scipy\":\n print(kind + \" scipy interpolation\")\n linear_interp = interp1d(wl, spec, kind=kind, bounds_error=False)\n new_spec = linear_interp(ref_wl)\n elif method == \"numpy\":\n if kind.lower() is not \"linear\":\n print(\"Warning: Cannot do \" + kind + \" interpolation with numpy, switching to linear\" )\n print(\"Linear numpy interpolation\")\n new_spec = np.interp(ref_wl, wl, spec) # 1-d peicewise linear interpolat\n else:\n print(\"Method was given as \" + method)\n raise(\"Not correct interpolation method specified\")\n print(\"Interpolation Time = \" + str(time.time() - starttime) + \" seconds\")\n\n return new_spec # test inperpolations ", "def interpolate_blinks2(self, lin_interpolation_points = [[-150],[150]], coalesce_period=500):\r\n\r\n # self.pupil_diff = (np.diff(self.interpolated_pupil) - np.diff(self.interpolated_pupil).mean()) / np.diff(self.interpolated_pupil).std()\r\n # self.peaks = myfuncs.detect_peaks(self.pupil_diff, mph=10, mpd=500, threshold=None, edge='rising', kpsh=False, valley=False, show=False, ax=False)[:-1] # last peak might not reflect blink...\r\n # if self.peaks != None:\r\n # points_for_interpolation = np.array([self.peaks, self.peaks], dtype=int).T + np.array(lin_interpolation_points).T\r\n # for itp in points_for_interpolation:\r\n # self.interpolated_pupil[itp[0]:itp[-1]] = np.linspace(self.interpolated_pupil[itp[0]], self.interpolated_pupil[itp[-1]], itp[-1]-itp[0])\r\n # self.interpolated_x[itp[0]:itp[-1]] = np.linspace(self.interpolated_x[itp[0]], self.interpolated_x[itp[-1]], itp[-1]-itp[0])\r\n # self.interpolated_y[itp[0]:itp[-1]] = np.linspace(self.interpolated_y[itp[0]], self.interpolated_y[itp[-1]], itp[-1]-itp[0])\r\n\r\n self.interpolated_time_points = np.zeros(len(self.interpolated_pupil))\r\n self.pupil_diff = (np.diff(self.interpolated_pupil) - np.diff(self.interpolated_pupil).mean()) / np.diff(self.interpolated_pupil).std()\r\n peaks_down = detect_peaks(self.pupil_diff, mph=10, mpd=1, threshold=0, edge='rising', kpsh=False, valley=False, show=False, ax=False)\r\n peaks_up = detect_peaks(self.pupil_diff*-1, mph=10, mpd=1, threshold=0, edge='rising', kpsh=False, valley=False, show=False, ax=False)\r\n self.peaks = np.sort(np.concatenate((peaks_down, peaks_up)))\r\n\r\n if len(self.peaks) > 0:\r\n\r\n # prepare:\r\n self.peak_starts = np.sort(np.concatenate((self.peaks-1, self.blink_starts)))\r\n self.peak_ends = np.sort(np.concatenate((self.peaks+1, self.blink_ends)))\r\n start_indices = np.ones(self.peak_starts.shape[0], dtype=bool)\r\n end_indices = np.ones(self.peak_ends.shape[0], dtype=bool)\r\n for i in range(self.peak_starts.shape[0]):\r\n try:\r\n if self.peak_starts[i+1] - self.peak_ends[i] <= coalesce_period:\r\n start_indices[i+1] = False\r\n end_indices[i] = False\r\n except IndexError:\r\n pass\r\n self.peak_starts = self.peak_starts[start_indices]\r\n self.peak_ends = self.peak_ends[end_indices]\r\n\r\n # interpolate:\r\n points_for_interpolation = np.array([self.peak_starts, self.peak_ends], dtype=int).T + np.array(lin_interpolation_points).T\r\n for itp in points_for_interpolation:\r\n self.interpolated_pupil[itp[0]:itp[-1]] = np.linspace(self.interpolated_pupil[itp[0]], self.interpolated_pupil[itp[-1]], itp[-1]-itp[0])\r\n self.interpolated_x[itp[0]:itp[-1]] = np.linspace(self.interpolated_x[itp[0]], self.interpolated_x[itp[-1]], itp[-1]-itp[0])\r\n self.interpolated_y[itp[0]:itp[-1]] = np.linspace(self.interpolated_y[itp[0]], self.interpolated_y[itp[-1]], itp[-1]-itp[0])\r\n self.interpolated_time_points[itp[0]:itp[-1]] = 1", "def gripStretchQgsLinearObjectList(linearObjectList, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n linearObjectListToStretch = qad_utils.QadLinearObjectList(linearObjectList)\n \n atPart = 0\n while atPart < linearObjectListToStretch.qty():\n linearObject = linearObjectListToStretch.getLinearObjectAt(atPart) \n if linearObject.isSegment():\n pt = linearObject.getStartPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto iniziale \n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setStartPt(pt)\n \n pt = linearObject.getEndPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto finale\n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setEndPt(pt)\n else: # se è arco\n newArc, newInverseFlag = gripStretchArc(linearObject.getArc(), ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, linearObject.isInverseArc())\n if newArc is None:\n return None\n linearObject.setArc(newArc, newInverseFlag)\n\n atPart = atPart + 1\n \n pt = linearObjectListToStretch.getCentroid(tolerance2ApproxCurve) # verifico se polilinea ha un centroide\n if pt is not None:\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n linearObjectListToStretch.move(offSetX, offSetY)\n\n return linearObjectListToStretch", "def polyphase_resample(pro, L, M, fs, fir, axis, **kwargs):\n\n if M >= pro.shape[axis]:\n msg = 'Decimation factor must M={} be < pro.shape[{}] = {}'\n raise ValueError(msg.format(M, axis, pro.shape[axis]))\n\n # pathological case: pro has < 3 chunks -> autoreduce csize\n csize = pro.chunksize\n if csize > pro.shape[axis] // 3:\n csize = pro.shape[axis] // 3\n\n # kaiser antialiasing & interpolation filter coeffecients\n cutoff = fs / (2*max(L, M))\n fstop = kwargs.pop('fstop', cutoff + cutoff / 10)\n fpass = kwargs.pop('fpass', cutoff - cutoff / 10)\n gpass, gstop = kwargs.pop('gpass', 0.1), kwargs.pop('gstop', 40)\n h = fir(fpass, fstop, fs, gpass, gstop).coeffs\n\n # ensure decimation of each produced array is integer samples\n if csize % M > 0:\n csize = int(np.ceil(csize / M) * M)\n\n # iterators for prior, current and next chunks of data\n x = producer(pro, csize, axis)\n y = producer(pro, csize, axis)\n z = producer(pro, csize, axis)\n iprior, icurrent, inext = (iter(pro) for pro in [x,y,z])\n\n # num pts to append left & right on axis to cover convolve overhang\n # must be divisible by M to ensure int slicing after resample.\n overhang = int(np.ceil((len(h) - 1) / M) * M)\n\n # initialize left/right pads for first data section\n left_shape = list(pro.shape)\n left_shape[axis] = overhang\n left = np.zeros(left_shape)\n # advance inext twice to get right pads\n next(inext)\n right = slice_along_axis(next(inext), 0, overhang, axis=axis)\n\n # compute the first resampled chunk\n current = next(icurrent)\n padded = np.concatenate((left, current, right), axis=axis)\n resampled = sps.resample_poly(padded, up=L, down=M, axis=axis, window=h)\n\n # remove result points computed from pads\n a, b = int(overhang * L / M), -int(overhang * L / M)\n yield slice_along_axis(resampled, a, b, axis=axis)\n\n # resample remaining chunks\n cnt = z.shape[axis] // csize + bool(z.shape[axis] % csize) - 1\n for n, (last, curr, nxt) in enumerate(zip(iprior, icurrent, inext), 1):\n\n # build left and right pads for current\n left = slice_along_axis(last, -overhang, axis=axis)\n \n if n < cnt - 1:\n right = slice_along_axis(nxt, 0, overhang, axis=axis)\n else:\n # at cnt-1 chunks concantenate next to current\n curr = np.concatenate((curr, nxt), axis=axis) \n right = np.zeros(left.shape)\n\n padded = np.concatenate((left, curr, right), axis=axis)\n resampled = sps.resample_poly(padded, L, M, axis=axis, window=h)\n yield slice_along_axis(resampled, a, b, axis=axis)", "def lin_scale( val, x1, y1, x2, y2 ):\r\n x_range = (x2 - x1)\r\n new_val = 0\r\n if x_range is 0:\r\n new_val = y1\r\n else:\r\n y_range = ( y2 - y1 )\r\n new_val = ( ( ( val - x1 ) * y_range ) / x_range ) + y1\r\n\r\n return new_val", "def downsample_lf(self, converted_lf):\n #For simplicity, we use every xth and yth pixel, instead of more complicated downsampling methods.\n DOWNSAMPLE_DIMS = (150, 150) # make output shape of each image less than these dimensions. \n step_size_x = int(converted_lf.shape[2]/DOWNSAMPLE_DIMS[0])+1\n step_size_y = int(converted_lf.shape[2]/DOWNSAMPLE_DIMS[0])+1\n \n return converted_lf[...,::step_size_x, ::step_size_y, :]", "def smooth(x,window_len=11,window='hanning'):\r\n\r\n if window_len<3:\r\n return x\r\n\r\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\r\n #print(len(s))\r\n if window == 'flat': #moving average\r\n w=np.ones(window_len,'d')\r\n else:\r\n w=eval('np.'+window+'(window_len)')\r\n\r\n y=np.convolve(w/w.sum(),s,mode='valid')\r\n return y[0:256]", "def run_experiment_with_various_length_scales_linear(bottom_bound, top_bound, side_length, mean, standard_deviation,\n pick_number, number_of_maps, step):\n not_cheating_data = []\n cheating_data = []\n for length_scale in range(bottom_bound, top_bound, step): # runs through each length scale\n points = create_points_with_spatially_correlated_pollution_2d(side_length, mean, standard_deviation,\n length_scale,\n number_of_maps) # Creates all points\n picked_points = pick_uniform_random_points_on_map_of_maps(points,\n pick_number) # Picks points to be measured\n interpolated_points = interpolate_unknown_points_of_a_map_of_maps_of_points(picked_points, points,\n # Interpolates using noncheating method\n RBF(np.random.randint(1e-05,\n 100 + 1)),\n fixed=False)\n\n not_cheating_data.append(\n average_rmse_of_maps(\n interpolated_points)) # adds average rms of all the trials for the noncheating method\n interpolated_points = interpolate_unknown_points_of_a_map_of_maps_of_points(picked_points, points,\n # Interpolates using cheating method\n RBF(length_scale,\n ), fixed=True)\n\n cheating_data.append(\n average_rmse_of_maps(\n interpolated_points)) # adds average rmse of all the trials for the cheating method\n # print(length_scale)\n\n plot_numbers(range(bottom_bound, top_bound, step), not_cheating_data, range(bottom_bound, top_bound, step),\n cheating_data, # Plots the data Red is not cheating, Green Cheating\n \"Length Scale\", \"RMSE\")", "def scale(input_img, size):\n width, height = size\n old_height, old_width = input_img.shape\n x_scale = float(height) / old_height\n y_scale = float(width) / old_width\n\n output_img = np.zeros((height, width), dtype=np.uint8)\n for xidx in xrange(height):\n old_x = float(xidx) / x_scale\n for yidx in xrange(width):\n old_y = float(yidx) / y_scale\n if old_x.is_integer() or old_y.is_integer():\n output_img[xidx, yidx] = input_img[int(old_x), int(old_y)]\n else: # use bilinear interpolation\n x1 = int(np.floor(old_x))\n x2 = int(np.ceil(old_x)) if int(np.ceil(old_x)) < old_height else old_height - 1\n y1 = int(np.floor(old_y))\n y2 = int(np.ceil(old_y)) if int(np.ceil(old_y)) < old_width else old_width - 1\n\n q11 = input_img[x1, y1]\n q12 = input_img[x1, y2]\n q21 = input_img[x2, y1]\n q22 = input_img[x2, y2]\n\n output_img[xidx, yidx] = (q11 * (x2 - old_x) * (y2 - old_y)\n + q21 * (old_x - x1) * (y2 - old_y)\n + q12 * (x2 - old_x) * (old_y - y1)\n + q22 * (old_x - x1) * (old_y - y1)) \\\n / ((x2 - x1) * (y2 - y1) + 1e-10)\n\n return output_img", "def splitting_intensity(self, **kwargs):\n \n if 'pol' not in kwargs:\n raise Exception('pol must be specified')\n \n copy = self.data.copy()\n copy.rotateto(kwargs['pol'])\n copy.x = np.gradient(copy.x)\n rdiff, trans = copy.chopdata()\n s = -2 * np.trapz(trans * rdiff) / np.trapz(rdiff**2)\n return s", "def forward(self, inp):\n outp = []\n x = inp\n\n if self.resize_input:\n x = F.interpolate(x, size=(299, 299), mode=\"bicubic\",)\n\n if self.normalize_input:\n x = denormalize(x)\n x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)\n if torch.cuda.is_available():\n x = x.to(\"cuda\")\n\n for idx, block in enumerate(self.blocks):\n x = block(x)\n if idx in self.output_blocks:\n outp.append(x)\n\n if idx == self.last_needed_block:\n break\n\n return outp", "def trace_gradient(self,p,direction='upgradient',stepsize=1,well_snap_distance = 1):\r\n \r\n if not direction == 'upgradient' and not direction == 'downgradient':\r\n raise Exception(\"direction must be either 'upgradient' or 'downgradient'.\")\r\n \r\n import scipy.spatial\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n import shapely.geometry\r\n \r\n ring = np.column_stack((\r\n np.cos(np.linspace(0,2*np.pi,361)),\r\n np.sin(np.linspace(0,2*np.pi,361)) )) \r\n ring *= self.domain_radius\r\n ring += np.asarray([np.real(self.domain_center),np.imag(self.domain_center)])\r\n \r\n # First, find all elements which could be stoppers\r\n stoppers = []\r\n stoppers.append(shapely.geometry.LineString(ring))\r\n for e in self.elementlist:\r\n \r\n if isinstance(e, ElementHeadBoundary):\r\n # Head Boundaries are valid end points\r\n stoppers.append(shapely.geometry.LineString(e.line[:,:2]))\r\n \r\n if isinstance(e, ElementWell):\r\n # Wells are valid end points\r\n stoppers.append(shapely.geometry.Point(np.asarray([np.real(e.zc),np.imag(e.zc)])))\r\n \r\n if isinstance(e, ElementLineSink):\r\n # Line Sinks are valid end points\r\n stoppers.append(shapely.geometry.LineString(e.line[:,:2]))\r\n \r\n if isinstance(e, ElementNoFlowBoundary):\r\n # No-flow Boundaries are valid end points\r\n stoppers.append(shapely.geometry.LineString(e.line[:,:2]))\r\n \r\n def gradient(p1,p2,p3,z1,z2,z3):\r\n \r\n area = abs((p1[0]*(p2[1]-p3[1])+p2[0]*(p3[1]-p1[1])+p3[0]*(p1[1]-p2[1]))/2)\r\n \r\n M = np.asarray(\r\n [[p2[1]-p3[1], p3[1]-p1[1], p1[1]-p2[1]],\r\n [p3[0]-p2[0], p1[0]-p3[0], p2[0]-p1[0]]])\r\n \r\n U = np.asarray([z1,z2,z3]).reshape((3,1))\r\n \r\n # Solution based on http://pers.ge.imati.cnr.it/livesu/papers/MLP18/MLP18.pdf Equation 1\r\n return np.dot(M,U)[:,0]/(2*area)\r\n \r\n # Check if the start point is complex, if yes, turn it into a real vector\r\n if np.iscomplex(p).any():\r\n p = np.asarray([np.real(p),np.imag(p)])\r\n \r\n # Depending on the direction, add a gradient\r\n if direction == 'upgradient':\r\n stepsize = stepsize\r\n else:\r\n stepsize = -stepsize\r\n \r\n # Set the repeater boolean to True\r\n repeater = True\r\n \r\n \r\n \r\n # Re-arrange the starting point into an array\r\n points = np.asarray(p).copy().reshape((1,2))\r\n # \"\"\"\r\n # Get three points \r\n testpoints = np.asarray([\r\n points[-1,0] + 1j*points[-1,1],\r\n points[-1,0] + stepsize/100 + 1j*points[-1,1],\r\n points[-1,0] + 1j*points[-1,1] + 1j*stepsize/100])\r\n \r\n testpoints = np.real(self.evaluate(testpoints,mode='head'))\r\n \r\n grad = np.asarray([\r\n testpoints[1]-testpoints[0],\r\n testpoints[2]-testpoints[0]])/stepsize*100\r\n grad = grad/np.linalg.norm(grad)\r\n # \"\"\"\r\n \r\n # grad = self.evaluate(\r\n # z = points,\r\n # mode = 'gradient',\r\n # derivatives = 'phi')\r\n # # grad = np.asarray([np.real(grad), np.imag(grad)])\r\n # grad = grad/np.linalg.norm(grad)\r\n \r\n # And save the result to the points array\r\n points = np.row_stack((\r\n points.copy(),\r\n points + grad*stepsize))\r\n \r\n # Now start the while loop, trace until the end\r\n while repeater:\r\n \r\n # The last point in the array is the starting point\r\n p = points[-1,:]\r\n \r\n # \"\"\"\r\n testpoints = np.asarray([\r\n points[-1,0] + 1j*points[-1,1],\r\n points[-1,0] + stepsize/100 + 1j*points[-1,1],\r\n points[-1,0] + 1j*points[-1,1] + 1j*stepsize/100])\r\n \r\n testpoints = np.real(self.evaluate(testpoints,mode='head'))\r\n \r\n grad = np.asarray([\r\n testpoints[1]-testpoints[0],\r\n testpoints[2]-testpoints[0]])/stepsize*100\r\n \r\n grad = grad/np.linalg.norm(grad)\r\n # \"\"\"\r\n \r\n # grad = self.evaluate(\r\n # z = points[-1,:],\r\n # mode = 'gradient',\r\n # derivatives = 'phi')\r\n # # grad = np.asarray([np.real(grad), np.imag(grad)])\r\n # grad = grad/np.linalg.norm(grad)\r\n \r\n # And append the next step to the list\r\n points = np.row_stack((\r\n points,\r\n points[-1,:] + grad*stepsize))\r\n \r\n \r\n line = shapely.geometry.LineString(points[-2:,:])\r\n \r\n # Check for stopping elements\r\n for stop in stoppers:\r\n \r\n # If this stopper is a well, check for distance\r\n if stop.type == 'Point':\r\n point = shapely.geometry.Point(points[-1,:])\r\n if point.distance(stop) <= well_snap_distance:\r\n points[-1,:] = np.asarray(point.xy)[:,0]\r\n repeater = False\r\n \r\n # Else, we can check for intersection\r\n else:\r\n if line.intersects(stop):\r\n \r\n if line.intersection(stop).type == 'Point':\r\n \r\n points[-1,:] = np.asarray(line.intersection(stop).xy)[:,0]\r\n repeater = False\r\n \r\n else:\r\n \r\n print(type(line.intersection(stop)))\r\n print((type(line.intersection(stop)) == 'Point'))\r\n \r\n points[-1,:] = np.asarray(line.intersection(stop)[0].xy)[:,0]\r\n repeater = False\r\n\r\n# # Check for oscillation\r\n# p2p = points[-3,:]-points[-2,:]\r\n# p1p = points[-2,:]-points[-1,:]\r\n# if np.inner(p1p,p2p) < 0: \r\n# # The trace direction has change by more than 90 degrees, i.e.\r\n# # turned back; stop iterating\r\n# points = points[:-1,:]\r\n# repeater = False\r\n \r\n return points", "def rescale_list(input_list, size):\n assert len(input_list) >= size\n\n # Get the number to skip between iterations.\n skip = len(input_list) // size\n\n # Build our new output.\n output = [input_list[i] for i in range(0, len(input_list), skip)]\n\n # Cut off the last one if needed.\n return output[:size]", "def _incremental_steps(start, end, steps, stepsize=None):\n if stepsize is None: step_size = (end - start) / np.maximum((steps - 1), 1)\n gradient = []\n for i in range(steps):\n value = start + step_size * i\n gradient.append(value)\n\n return gradient[0:steps]", "def __init__(self, x, y, kind='linear', axis=-1,\n copy=True, bounds_error=True, fill_value=np.nan):\n _Interpolator1D.__init__(self, x, y, axis=axis)\n\n self.copy = copy\n self.bounds_error = bounds_error\n self.fill_value = fill_value\n\n if kind in ['zero', 'slinear', 'quadratic', 'cubic']:\n order = {'nearest': 0, 'zero': 0,'slinear': 1,\n 'quadratic': 2, 'cubic': 3}[kind]\n kind = 'spline'\n elif isinstance(kind, int):\n order = kind\n kind = 'spline'\n elif kind not in ('linear', 'nearest'):\n raise NotImplementedError(\"%s is unsupported: Use fitpack \"\n \"routines for other types.\" % kind)\n x = array(x, copy=self.copy)\n y = array(y, copy=self.copy)\n\n if x.ndim != 1:\n raise ValueError(\"the x array must have exactly one dimension.\")\n if y.ndim == 0:\n raise ValueError(\"the y array must have at least one dimension.\")\n\n # Force-cast y to a floating-point type, if it's not yet one\n if not issubclass(y.dtype.type, np.inexact):\n y = y.astype(np.float_)\n\n # Backward compatibility\n self.axis = axis % y.ndim\n\n # Interpolation goes internally along the first axis\n self.y = y\n y = self._reshape_yi(y)\n\n # Adjust to interpolation kind; store reference to *unbound*\n # interpolation methods, in order to avoid circular references to self\n # stored in the bound instance methods, and therefore delayed garbage\n # collection. See: http://docs.python.org/2/reference/datamodel.html\n if kind in ('linear', 'nearest'):\n # Make a \"view\" of the y array that is rotated to the interpolation\n # axis.\n minval = 2\n if kind == 'nearest':\n self.x_bds = (x[1:] + x[:-1]) / 2.0\n self._call = self.__class__._call_nearest\n else:\n self._call = self.__class__._call_linear\n else:\n minval = order + 1\n self._spline = splmake(x, y, order=order)\n self._call = self.__class__._call_spline\n\n if len(x) < minval:\n raise ValueError(\"x and y arrays must have at \"\n \"least %d entries\" % minval)\n\n self._kind = kind\n self.x = x\n self._y = y", "def interp(s, r, l=11, alpha=0.5):\n b = firwin(2*l*r+1, alpha/r);\n a = 1\n return r*lfilter(b, a, upsample(s, r))[r*l+1:-1]" ]
[ "0.59042335", "0.5510684", "0.5495997", "0.543274", "0.54308456", "0.5368978", "0.53469396", "0.5295205", "0.5252446", "0.5242712", "0.51369417", "0.5123754", "0.5104273", "0.5101531", "0.5047771", "0.50370836", "0.5025036", "0.50159943", "0.49947384", "0.49876678", "0.4978305", "0.49636522", "0.49388546", "0.49343756", "0.4929394", "0.49259147", "0.492023", "0.49149087", "0.49122736", "0.49077895", "0.48845872", "0.48705453", "0.48699415", "0.48698097", "0.4853402", "0.4848068", "0.48462936", "0.48383492", "0.48308977", "0.48295665", "0.48211327", "0.48176116", "0.48104668", "0.480353", "0.4802605", "0.4799733", "0.4798039", "0.4792269", "0.47910306", "0.47872218", "0.47848064", "0.47766963", "0.4766511", "0.47654337", "0.47580206", "0.47449785", "0.47386932", "0.4733569", "0.47315946", "0.47293353", "0.47168973", "0.47102442", "0.47082487", "0.47078875", "0.46990424", "0.46820343", "0.46801156", "0.4675627", "0.46618882", "0.46618345", "0.46569234", "0.46529496", "0.46509948", "0.46505356", "0.46436816", "0.46405768", "0.46345526", "0.46336818", "0.46316215", "0.46276867", "0.46262968", "0.46248317", "0.46236157", "0.4611125", "0.46082258", "0.46071953", "0.46047613", "0.4604176", "0.45946798", "0.45936278", "0.45898664", "0.45849854", "0.45748788", "0.4572146", "0.4571497", "0.45595995", "0.45573545", "0.4556639", "0.45565608", "0.45540592" ]
0.6795765
0
Calculate 1D rotationally averaged power spectra from image stack listed in a directory
def rops_dir(indir, output_dir = "1dpw2_dir"): from EMAN2 import periodogram import os flist = os.listdir(indir) print flist if os.path.exists(output_dir) is False: os.mkdir(output_dir) for i, v in enumerate(flist): (filename, filextension) = os.path.splitext(v) nima = EMUtil.get_image_count(os.path.join(indir,v)) print nima for im in xrange(nima): e = EMData() file_name = os.path.join(indir,v) e.read_image(file_name, im) tmp1 = periodogram(e) tmp = tmp1.rotavg() if im == 0: sum_ima = model_blank(tmp.get_xsize()) sum_ima += tmp else : sum_ima += tmp table = [] nr = sum_ima.get_xsize() for ir in xrange(nr): table.append([sum_ima.get_value_at(ir)]) drop_spider_doc(os.path.join(output_dir, "1dpw2_"+filename+".txt"), table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def spectra_stacker(file_name):\n file_data = read_file(file_name)\n image_data = file_data[1]\n\n data_shape = np.shape(image_data)\n ra_axis = data_shape[2]\n dec_axis = data_shape[1]\n wl_axis = data_shape[0]\n\n pxl_total = ra_axis * dec_axis\n \n data_unwrap = [] \n for i_ra in range(ra_axis):\n for i_dec in range(dec_axis):\n pixel_data = image_data[:][:,i_dec][:,i_ra]\n \n data_unwrap.append(pixel_data)\n\n data_stacked = np.zeros((pxl_total, wl_axis))\n for i_row in range(np.shape(data_unwrap)[0]):\n data_row = data_unwrap[i_row]\n for i_pixel in range(len(data_row)):\n data_stacked[i_row][i_pixel] = data_row[i_pixel]\n\n # writing data to a fits file\n hdr = fits.Header()\n hdr['CTYPE1'] = 'pixel'\n hdr['CRPIX1'] = 1\n hdr['CRVAL1'] = data_stacked[0][0]\n hdr['CDELT1'] = data_stacked[0][1] - data_stacked[0][0]\n\n primary_hdu = fits.PrimaryHDU(header=hdr)\n hdu = fits.ImageHDU(data_stacked)\n\n hdul = fits.HDUList([primary_hdu, hdu])\n\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n hdul.writeto(data_dir + '/stacked.fits')\n return data_unwrap", "def average( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n asum = radioastronomy.Spectrum()\n nsum = 0\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if nsum == 0:\n asum = copy.deepcopy( rs)\n firstlon = rs.gallon\n asum.ydataA = rs.ydataA * rs.durationSec\n asum.gallat = rs.gallat * rs.durationSec\n asum.gallon = rs.gallon * rs.durationSec\n nsum = 1\n firstutc = rs.utc\n lastutc = rs.utc\n else:\n asum.ydataA = asum.ydataA + (rs.ydataA * rs.durationSec)\n asum.count = asum.count + rs.count\n asum.durationSec = asum.durationSec + rs.durationSec\n # fix wrap of longitudes\n if abs(rs.gallon - firstlon) > 180:\n crossZero = True\n if rs.gallon > firstlon:\n rs.gallon = rs.gallon - 360.\n else:\n rs.gallon = rs.gallon + 360.\n asum.gallon = asum.gallon + (rs.gallon * rs.durationSec)\n asum.gallat = asum.gallat + (rs.gallat * rs.durationSec)\n # keep track of observing time for weighted sum\n lastutc = rs.utc\n nsum = nsum + 1\n #end for all files loop\n\n if nsum < 1:\n print \"No acceptable files in average list\"\n else:\n asum.ydataA = asum.ydataA/float(asum.durationSec)\n asum.gallon = asum.gallon/float(asum.durationSec)\n asum.gallat = asum.gallat/float(asum.durationSec)\n aveutc,duration = radioastronomy.aveutcs( firstutc, lastutc)\n asum.utc = aveutc\n if (duration < 1.):\n print 'hotcold.average: very short average interval: ',duration\n return nsum, asum", "def combine1(prefix):\n files = glob.glob(prefix + '/*.npz')\n files = [numpy.load(f) for f in files]\n edges = files[0]['edges']\n r = []\n m = []\n e = []\n r = files[0]['xi'][0, 0]\n l = []\n for i in range(len(edges) - 1):\n xi = [f['xi'][i, 1, :] for f in files]\n l.append(\n (r, \n numpy.mean(xi, axis=0),\n numpy.std(xi, axis=0) * len(xi) ** -0.5))\n return numpy.array(l)", "def analysis(file_name, sky_file_name):\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n plt.rcParams['text.latex.preamble'] = [r'\\boldmath']\n\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n data_dir = 'cube_results/' + stk_f_n\n \n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n spectra_stacker(file_name)\n\n # one figure to rule them all\n main_fig = plt.figure(1)\n\n # calling data once will be enough\n im_coll_data = image_collapser(file_name)\n spectra_data = spectrum_creator(file_name)\n sr = wavelength_solution(file_name) \n gs_data = spectra_analysis(file_name, sky_file_name)\n \n def graph_indiv():\n cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps'])\n cbs_y = gs_data['gd_shifted'] \n\n # plotting spectra to check\n fig, ax3 = plt.subplots()\n ax3.plot(cbd_x, cbs_y, linewidth=0.5, color=\"#000000\")\n ax3.tick_params(labelsize=20)\n ax3.set_xlabel(r'\\textbf{Wavelength (\\AA)}', fontsize=20)\n ax3.set_ylabel(r'\\textbf{Flux}', fontsize=20)\n fig.savefig(data_dir + \"/\" + stk_f_n + '_single_spectra.pdf', \n bbox_inches=\"tight\")\n\n # --- for collapsed images ---\n def graphs_collapsed(): \n f, (ax1, ax2) = plt.subplots(1, 2)\n \n ax1.imshow(im_coll_data['median'], cmap='gray_r') \n ax1.set_title(r'\\textbf{galaxy: median}', fontsize=13) \n ax1.set_xlabel(r'\\textbf{Pixels}', fontsize=13)\n ax1.set_ylabel(r'\\textbf{Pixels}', fontsize=13) \n\n ax2.imshow(im_coll_data['sum'], cmap='gray_r')\n ax2.set_title(r'\\textbf{galaxy: sum}', fontsize=13) \n ax2.set_xlabel(r'\\textbf{Pixels}', fontsize=13)\n ax2.set_ylabel(r'\\textbf{Pixels}', fontsize=13)\n \n f.subplots_adjust(wspace=0.4)\n f.savefig(data_dir + \"/\" + stk_f_n + '_collapsed_images.pdf')\n\n \n snw_data = sky_noise_weighting(file_name, sky_file_name)\n df_data = otwo_doublet_fitting(file_name, sky_file_name)\n\n # --- spectra ---\n def graphs_spectra():\n f, (ax1, ax2) = plt.subplots(2, 1) \n\n # --- redshifted data plotting\n cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps'])\n \n ## plotting our cube data\n cbs_y = gs_data['gd_shifted'] \n ax1.plot(cbd_x, cbs_y, linewidth=0.5, color=\"#000000\")\n\n ## plotting our sky noise data\n snd_y = snw_data['sky_regions'][:,1]\n ax1.plot(cbd_x, snd_y, linewidth=0.5, color=\"#f44336\", alpha=0.5)\n\n # plotting spectra to check\n fig, ax3 = plt.subplots()\n ax3.plot(cbd_x, cbs_y, linewidth=0.5, color=\"#000000\")\n ax3.tick_params(labelsize=20)\n ax3.set_xlabel(r'\\textbf{Wavelength (\\AA)}', fontsize=20)\n ax3.set_ylabel(r'\\textbf{Flux}', fontsize=20)\n fig.savefig(data_dir + \"/\" + stk_f_n + '_single_spectra.pdf', \n bbox_inches=\"tight\")\n\n ## plotting our [OII] region\n ot_x = df_data['x_region']\n ot_y = df_data['y_region']\n ax1.plot(ot_x, ot_y, linewidth=0.5, color=\"#00c853\") \n\n ## plotting the standard deviation region in the [OII] section\n std_x = df_data['std_x']\n std_y = df_data['std_y']\n ax1.plot(std_x, std_y, linewidth=0.5, color=\"#00acc1\") \n \n ## plotting peak lines for scipy finder and peakutils finder\n #pk_lines = gs_data['gd_peaks']\n #for i in range(len(pk_lines)):\n #srb = sr['begin']\n #ax1.axvline(x=srb+pk_lines[i], linewidth=0.5, color=\"#8bc34a\", alpha=0.2)\n \n pu_lines = gs_data['pu_peaks']\n for i in range(len(pu_lines)):\n srb = sr['begin']\n ax1.axvline(x=(pu_lines[i]), linewidth=0.5, color=\"#ec407a\", alpha=0.2)\n\n ax1.set_title(r'\\textbf{spectra: cross-section redshifted}', fontsize=13) \n ax1.set_xlabel(r'\\textbf{Wavelength (\\AA)}', fontsize=13)\n ax1.set_ylabel(r'\\textbf{Flux}', fontsize=13)\n ax1.set_ylim([-1000,5000]) # setting manual limits for now\n \n # --- corrected redshift\n crs_x = np.linspace(sr['begin'], sr['end'], sr['steps'])\n rdst = gs_data['redshift']\n\n sp_lines = gs_data['spectra']\n\n ## corrected wavelengths\n corr_x = crs_x / (1+rdst)\n\n ## plotting our cube data\n cps_y = gs_data['gd_shifted']\n ax2.plot(corr_x, cps_y, linewidth=0.5, color=\"#000000\")\n\n ## plotting our sky noise data\n sn_y = gs_data['sky_noise']\n ax2.plot(corr_x, sn_y, linewidth=0.5, color=\"#e53935\")\n \n ## plotting spectra lines\n for e_key, e_val in sp_lines['emis'].items():\n spec_line = float(e_val)\n ax2.axvline(x=spec_line, linewidth=0.5, color=\"#00c853\")\n ax2.text(spec_line-10, 4800, e_key, rotation=-90)\n \n ax2.set_title(r'\\textbf{spectra: cross-section corrected}', fontsize=13) \n ax2.set_xlabel(r'\\textbf{Wavelength (\\AA)}', fontsize=13)\n ax2.set_ylabel(r'\\textbf{Flux}', fontsize=13)\n ax2.set_ylim([-500,5000]) # setting manual limits for now\n\n f.subplots_adjust(hspace=0.5)\n f.savefig(data_dir + \"/\" + stk_f_n + '_spectra.pdf')\n\n # saving our plotting into npy files so they can be used elsewhere\n np.save(data_dir + \"/\" + stk_f_n + \"_cbd_x\", cbd_x)\n np.save(data_dir + \"/\" + stk_f_n + \"_cbs_y\", cbs_y) \n\n np.save(data_dir + \"/\" + stk_f_n + \"_snd_y\", snd_y) \n \n np.save(data_dir + \"/\" + stk_f_n + \"_corr_x\", corr_x)\n np.save(data_dir + \"/\" + stk_f_n + \"_cps_y\", cps_y)\n \n def graphs_otwo_region():\n ot_fig = plt.figure(6)\n\n # plotting the data for the cutout [OII] region\n ot_x = df_data['x_region']\n ot_y = df_data['y_region']\n plt.plot(ot_x, ot_y, linewidth=1.5, color=\"#000000\")\n\n ## plotting the standard deviation region in the [OII] section\n std_x = df_data['std_x']\n std_y = df_data['std_y']\n #plt.plot(std_x, std_y, linewidth=1.5, color=\"#00acc1\") \n\n dblt_rng = df_data['doublet_range']\n ot_x_b, ot_x_e = dblt_rng[0], dblt_rng[-1]\n x_ax_vals = np.linspace(ot_x_b, ot_x_e, 1000)\n\n # lmfit \n lm_init = df_data['lm_init_fit']\n lm_best = df_data['lm_best_fit'] \n\n plt.plot(ot_x, lm_best, linewidth=1.5, color=\"#1e88e5\", \n label=r\"\\textbf{Best fit}\")\n plt.plot(ot_x, lm_init, linewidth=1.5, color=\"#43a047\", alpha=0.5,\n label=r\"\\textbf{Initial guess}\")\n \n lm_params = df_data['lm_best_param']\n lm_params = [prm_value for prm_key, prm_value in lm_params.items()]\n c, i_val1, i_val2, sig_g, rdsh, sig_i = lm_params\n\n dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths for OII\n l1 = dblt_mu[0] * (1+rdsh)\n l2 = dblt_mu[1] * (1+rdsh)\n \n sig = np.sqrt(sig_g**2 + sig_i**2) \n norm = (sig*np.sqrt(2*np.pi))\n\n lm_y1 = c + ( i_val1 / norm ) * np.exp(-(ot_x-l1)**2/(2*sig**2))\n lm_y2 = c + ( i_val2 / norm ) * np.exp(-(ot_x-l2)**2/(2*sig**2))\n \n plt.plot(ot_x, lm_y1, linewidth=1.5, color=\"#e64a19\", alpha=0.7, \n label=r\"\\textbf{Gaussian 1}\") \n plt.plot(ot_x, lm_y2, linewidth=1.5, color=\"#1a237e\", alpha=0.7,\n label=r\"\\textbf{Gaussian 2}\")\n\n # plotting signal-to-noise straight line and gaussian to verify it works\n sn_line = df_data['sn_line']\n sn_gauss = df_data['sn_gauss']\n\n #plt.axhline(y=sn_line, linewidth=0.5, color=\"#5c6bc0\", alpha=0.7) \n #plt.plot(ot_x, sn_gauss, linewidth=0.5, color=\"#5c6bc0\", alpha=0.7)\n\n #plt.title(r'\\textbf{[OII] region}', fontsize=13)\n plt.legend(loc='upper left', prop={'size': 15})\n plt.tick_params(labelsize=20)\n plt.xlabel(r'\\textbf{Wavelength (\\AA)}', fontsize=20)\n plt.ylabel(r'\\textbf{Flux}', fontsize=20)\n plt.xlim([l1-100,np.max(ot_x)])\n plt.ylim([-100,np.max(ot_y)+100]) # setting manual limits for now\n plt.savefig(data_dir + \"/\" + stk_f_n + '_otwo_region.pdf',bbox_inches=\"tight\")\n\n graph_indiv()\n graphs_collapsed()\n graphs_spectra()\n graphs_otwo_region()\n\n plt.close(\"all\")\n\n return {'image_data': im_coll_data, 'spectra_data': spectra_data, 'sr': sr,\n 'df_data': df_data, 'gs_data': gs_data, 'snw_data': snw_data}", "def createAverageImages(self):\n for grabber in self.grabbers:\n callsign = grabber[\"ID\"]\n callMatch = \"%s/%s*\" % (self.downloadFolder, callsign)\n fnameOut = \"%s/%s.%s.jpg\" % (self.averagesFolder, callsign, self.timeCode())\n cmd = \"convert %s -evaluate-sequence Mean %s\" %(callMatch, fnameOut)\n print(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()", "def average_ps(self):\n\n self.powerspectrum=np.average(self.powerspectra, axis=0)", "def process_images(images, cam, params):\n print cam, params\n groups = groupby(images, \"EXPTIME\")\n for time, ims in groups.items():\n func = {\"sbc\": make_sbc_flat_name, \"sky\": make_sky_flat_name}[cam]\n out = func(time, params)\n out = os.path.join(FLATPATH, out)\n print time, len(ims), out\n make_flat_avg(ims, out)", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def main():\n# pixel_to_wavelen_dir = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\\n# Spectral_Band_pass\\Pixel_to_wavelen_map'\n\n file_path = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\Spectral_Band_pass\\\n All_FWHM_only_Gaussian'\n radiance_file = read_radiance_data()\n file_path_2 = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\Spectral_Band_pass\\\n All_FWHM\\spectral_bandpass_1400'\n\n #start with Gaussian Bandpass\n# data_names = [each for each in os.listdir(file_path)\n# if each.startswith(\"Params_Gauss\")]\n#\n#\n# sample_data = []\n# for data_files in data_names[9:]:\n# #print(data_files)\n#\n# wavelen_suffix = data_files.split('_')[-1]\n#\n# pixel_to_wvl_map_data = sorted([each for each in os.listdir(pixel_to_wavelen_dir)\n# if each.endswith(wavelen_suffix)])\n#\n# gaussian_files = os.path.join(file_path, data_files)\n#\n# dframe = pd.read_csv(gaussian_files)\n# #dframe = dframe[['A1', 'A2', 'Sigma1', 'Sigma2']]\n# dframe = dframe[['A1', 'Sigma1']] # for Gaussian only\n# pixel_to_wav_map = os.path.join(pixel_to_wavelen_dir, pixel_to_wvl_map_data[0])\n# dframe1 = pd.read_csv(pixel_to_wav_map)\n# dframe['CW'] = dframe1['CW']\n# dframe = dframe.iloc[1400]\n# sample_data.append(dframe.values)\n # for flat top Gaussian\n# #gaussian_values = perform_spectral_interpolation(np.array(sample_data))\n\n# gaussian_values = perform_spectral_interpolation_only_gaussian(np.array(sample_data))\n#\n##\n## # Let us now create a spectral bandpass\n# #create_spectral_bandpass(gaussian_values, radiance_file, file_path) # flat top Gaussian\n# create_spectral_bandpass_only_gaussian(gaussian_values, radiance_file, file_path)\n#\n#\n## #Make sure that the center wavelength of Gaussians are the same\n## sample_val = []\n## data_names_interpol = sorted([each for each in os.listdir(file_path_2)\n## if each.endswith('csv')])\n## interpol_wavelen = []\n## interpol_rad = [ ]\n##\n## for i in range(0, 64):\n## sub_sample_wvl = []\n## sub_sample_rad = []\n##\n## for files in data_names_interpol[9:]:\n##\n## interpol_rsr = os.path.join(file_path_2, files)\n## dframe = pd.read_csv(interpol_rsr, usecols=[\"wavelength\", \"rad\"])\n##\n## wavelength = dframe['wavelength'][i]\n## rad = dframe['rad'][i]\n## sub_sample_wvl.append(wavelength)\n## sub_sample_rad.append(rad)\n## dframe = perform_point_interpolation(sub_sample_wvl, sub_sample_rad,\n #np.array(sample_data)[:,-1])\n## interpol_rad.append(dframe['rad'].values)\n## interpol_wavelen.append(dframe['wavelength'].values)\n## create_spectral_bandpass_interpol(np.array(interpol_wavelen),\n #np.array(interpol_rad),\n #np.array(sample_data)[:,-1], file_path_2)\n# cc\n##\n#\n##\n###\n## # let us now perfrom spectral convolution with high res. radiance data\n calculate_in_band_irradiance(file_path, file_path_2, radiance_file)", "def averageImage(dir): \n total_image = np.zeros(shape=(496, 512))\n for subdir, dirs, files in os.walk(dir):\n # print(subdir, files)\n # try:\n \n for file in files:\n img_path = os.path.join(subdir, file)\n img = np.asarray(Image.open(img_path))\n \n delta_height = img.shape[0] - total_image.shape[0]\n delta_width = img.shape[1] - total_image.shape[1]\n \n \n delta_left = delta_width//2 if not delta_width%2 else (delta_width//2+1)\n delta_right = delta_width//2\n delta_top = delta_height//2 if not delta_height%2 else (delta_height//2+1)\n delta_bottom = delta_height//2\n \n # print(img.shape, delta_top, delta_bottom, delta_left, delta_right)\n total_image = np.add(total_image, img[delta_top: (img.shape[0]-delta_bottom), delta_left:(img.shape[1]-delta_right)])\n # except:\n # total_image = np.zeros(shape=(496, 768))\n # for file in files:\n # img_path = os.path.join(subdir, file)\n # total_image = np.add(total_image, np.asarray(Image.open(img_path)))\n \n # if total_image.shape == (496, 768):\n # total_image = total_image[:, 128:(768-127)]\n return total_image/len(files)", "def compute_average(imlist):\n # open first image and make into array of type float\n averageim = np.array(Image.open(imlist[0]), 'f')\n for imname in imlist[1:]:\n try:\n averageim += np.array(Image.open(imname))\n except:\n print(imname + '...skipped')\n averageim /= len(imlist)\n # return average as uint8\n return np.array(averageim, 'uint8')", "def read_mastcam_dir(self, filepath, suffix, unit, feature, extension = '.IMG', lblext='.LBL_label', eye='LR', margin=6):\n \n if eye == 'L':\n eyez = 'ML'\n elif eye == 'R':\n eyez = 'MR'\n elif eye == 'LR':\n eyez = ''\n pass\n else:\n raise ValueError('Eye name %s is not valid! Use L, R, or LR.' % eye)\n \n # GET ALL FILES WITH GIVEN EXTENSION IN FILEPATH\n files = sorted(glob.glob(str(filepath) + \"*\" + eyez + \"*\" + str(suffix) + \"*\" + str(extension)))\n fileprefixes = sorted(list(set([f.split('/')[-1][0:12] for f in files])))\n print(fileprefixes)\n \n print(\"found %d files among %d sequences with eye %s and extension %s in %s:\" % (len(files), len(fileprefixes), eye, extension, filepath))\n assert len(files) > 0\n \n numfiles = len(fileprefixes)\n seen = 0\n percent = 0.0\n printed = [False for foo in range(1000)]\n \n fullimages = {}\n segmentation = {}\n\n data = []\n self.labels = []\n \n for fileprefix in fileprefixes:\n print(\" \" + fileprefix)\n \n thissequence = sorted(glob.glob(str(filepath) + fileprefix + \"*\" + str(suffix) + \"*\" + str(extension)))\n asdfghjkl = 0\n \n parser = Parser()\n seqfiltstr = \"\"\n dimlist = []\n for w in thissequence:\n labels = parser.parse(open_pds(w.replace(extension, lblext))) \n filt = labels['INSTRUMENT_STATE_PARMS']['FILTER_NAME'][9]\n seqfiltstr += filt\n h = int(labels['IMAGE']['LINES'])\n w = int(labels['IMAGE']['LINE_SAMPLES'])\n dimlist.append([h, w])\n #print(\" %s %s %s\" % (filt, h, w))\n\n print(\"Filter name:\", labels['INSTRUMENT_STATE_PARMS']['FILTER_NAME'])\n \n #print(seqfiltstr)\n # print(dimlist)\n seqstocombine = []\n \n # Handle cases which appear to be several series of observations\n if len(seqfiltstr) % 7 == 0:\n for i in range(len(seqfiltstr) // 7):\n subseq = thissequence[7*i:7*i+7]\n subseqfilt = seqfiltstr[7*i:7*i+7]\n if subseqfilt == '0123456':\n cont = False\n for j in range(7*i, 7*i+7):\n if dimlist[7*i] != dimlist[j]:\n print(\"SIZE ERROR\")\n cont = True\n if cont:\n continue\n \n seqstocombine.append(subseq)\n \n else:\n if seqfiltstr == '00112233445566':\n seq1 = [thissequence[2*i] for i in range(len(thissequence) // 2)]\n seq2 = [thissequence[2*i+1] for i in range(len(thissequence) // 2)]\n \n seqstocombine.append(seq1)\n seqstocombine.append(seq2)\n \n break\n else:\n print(\"Length multiple of 7 but bad sequence\")\n\n # Non-7 number of observations\n else:\n for i in range(len(seqfiltstr)):\n subseq = thissequence[i:i+7]\n subseqfilt = seqfiltstr[i:i+7]\n if subseqfilt == '0123456':\n cont = False\n for j in range(i, i+7):\n if dimlist[i] != dimlist[j]:\n print(\"SIZE ERROR\")\n cont = True\n if cont: continue\n \n seqstocombine.append(subseq)\n \n # No actual multispectral images exist, so use all RGB (sol 388)\n if len(seqstocombine) == 0 and 'sol388' in self.archive:\n seqstocombine = [[f] for f in thissequence]\n \n # Now, download each sequence with this prefix\n for subseq in seqstocombine:\n qwertyuiop = 0\n bigimage = None\n \n err = False\n # Get each image within sequence\n for filename in subseq:\n namestem = filename.split('.')[0].split('/')[-1]\n\n try:\n (image, lbls) = self.load_image(namestem, filepath, ext=extension, lblext=lblext)\n except ValueError as e:\n #print(\"An error happened while processing %s\" % filename)\n err = True\n break\n\n (h, w, b) = image.shape\n \n if b == 3:\n self.rgbdict[fileprefix + str(asdfghjkl)] = namestem\n fullimages[fileprefix + str(asdfghjkl)] = image\n #print(\"Stored %s to rgbdict\" % (fileprefix + str(asdfghjkl)))\n \n if bigimage == None and 'sol388' not in filepath:\n bigimage = np.zeros([h, w, 9], dtype='uint8')\n elif bigimage == None:\n bigimage = np.zeros([h, w, b], dtype='uint8')\n \n bigimage[:,:,qwertyuiop:qwertyuiop+b] = image\n\n qwertyuiop += b\n \n\n # Reorder images based on camera so filters are ordered\n if eye in ['L', 'R']:\n bi = np.zeros([h, w, 9], dtype='uint8')\n if eye == 'L':\n bi[:, :, 0] = bigimage[:, :, 0]\n bi[:, :, 1] = bigimage[:, :, 1]\n bi[:, :, 2] = bigimage[:, :, 2]\n bi[:, :, 3] = bigimage[:, :, 4]\n bi[:, :, 4] = bigimage[:, :, 3]\n bi[:, :, 5] = bigimage[:, :, 6]\n bi[:, :, 6] = bigimage[:, :, 5]\n bi[:, :, 7] = bigimage[:, :, 7]\n bi[:, :, 8] = bigimage[:, :, 8]\n elif eye == 'R':\n bi[:, :, 0] = bigimage[:, :, 2]\n bi[:, :, 1] = bigimage[:, :, 1]\n bi[:, :, 2] = bigimage[:, :, 0]\n bi[:, :, 3] = bigimage[:, :, 4]\n bi[:, :, 4] = bigimage[:, :, 3]\n bi[:, :, 5] = bigimage[:, :, 5]\n bi[:, :, 6] = bigimage[:, :, 6]\n bi[:, :, 7] = bigimage[:, :, 7]\n bi[:, :, 8] = bigimage[:, :, 8]\n bigimage = bi\n\n if err:\n print(\" ...didn't load sequence. There was an error.\")\n continue\n \n print(\" ...loaded one sequence:\", (fileprefix + str(asdfghjkl)))\n \n if 'sol388' not in self.archive:\n name = fileprefix + str(asdfghjkl) + '_' + unit + '_' + feature\n else:\n name = namestem + '_' + unit + '_' + feature\n\n \n (segments, segmentlabels) = self.segment_image(bigimage, unit=unit)\n segmentation[fileprefix + str(asdfghjkl)] = segments[0][1]\n\n for i in range(len(segments)):\n data += [[float(x) for x in self.process_image(segments[i], name + segmentlabels[i], feature=feature)]]\n \n asdfghjkl += 1\n \n ###########################################\n \n seen += 1\n \n # output read-in progress\n if percent < 100:\n if (round((seen / float(numfiles)) * 100, 1) >= percent) and (printed[int(percent * 10)] == False):\n #print(\"...%3.1f%%...\" % percent)\n printed[int(percent * 10)] == True\n percent = round(((seen / float(numfiles)) * 100), 1) + 1\n print(\"...100%...\")\n print(\"Transposing data...\")\n data = np.array(data).T\n self.xvals.sort()\n \n # Output the pickle\n print(\"Writing pickle to \" + self.archive + \" ...\")\n outf = open(self.archive, 'w')\n pickle.dump((data, fullimages, segmentation, self.labels, self.xlabel, self.ylabel, self.xvals, self.rgbdict, self.lblext, self.initdata, self.initfilename), outf)\n outf.close()\n print(\"Wrote pickle to \" + self.archive)", "def spectrum_creator(file_name):\n file_data = read_file(file_name)\n image_data = file_data[1]\n\n segmentation_data = file_data[2]\n\n collapsed_data = image_collapser(file_name)\n\n # spectrum for central pixel\n cp_bright = []\n for key, data in collapsed_data.items():\n lgst_val = data.argmax()\n lgst_loc = unravel_index(data.argmax(), data.shape)\n cp_bright.append(lgst_loc)\n\n cp_loc = 0\n if ( cp_bright[0] == cp_bright[1] ):\n cp_loc = cp_bright[0]\n else: \n cp_loc = cp_bright[1]\n\n cp_spec_data = image_data[:][:,cp_loc[0]][:,cp_loc[1]]\n\n # spectrum as defined by the segmentation area\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = [int(x) for x in re.findall('\\d+', stk_f_n)][0]\n\n # locating where the galaxy pixels are from the cube_id\n seg_curr_cube = np.where(segmentation_data == cube_id)\n scc_rows, scc_cols = seg_curr_cube\n\n #np.set_printoptions(threshold=np.nan)\n #print(segmentation_data)\n\n collapsed_spectrum = np.zeros([np.shape(image_data)[0], len(scc_rows)])\n for i_r in range(len(scc_rows)):\n # I want to pull out each pixel and store it into the collapsed spectrum array\n collapsed_spectrum[:,i_r] = image_data[:,scc_rows[i_r],scc_cols[i_r]]\n \n galaxy_spectrum = np.zeros(np.shape(image_data)[0])\n for i_ax in range(len(galaxy_spectrum)):\n galaxy_spectrum[i_ax] = np.nansum(collapsed_spectrum[i_ax])\n \n return {'central': cp_spec_data, 'galaxy': galaxy_spectrum, \n 'segmentation': segmentation_data}", "def compute_average( imlist): \n\t# open first image and make into array of type float \n\taverageim = array( Image.open( imlist[ 0]), 'f') \n\tfor imname in imlist[ 1:]: \t\t\n\t\taverageim = averageim+array( Image.open( imname))\n\taverageim = averageim/len( imlist) \n\t# return average as uint8 \n\t#return array( averageim, 'uint8')\n\treturn averageim", "def sigma_clipping(path):\n\n #Sets the path to the specified directory\n os.chdir(path)\n #Makes directory for each iteration \n os.system('mkdir 1st_interation_sigma_clip 2nd_interation_sigma_clip 3rd_interation_sigma_clip')\n\n #Reading in files and setting the local arrays\n list_of_files=sorted(glob.glob('*flt.fits'))\n file_number=[]\n file_date=[]\n file_mean1=[]\n file_mean2=[]\n #Masking the ext 1 & 4 with ext 3 & 6 then calcualting the staistics \n for im in list_of_files:\n h=fits.open(im)\n name=h[0].header['rootname']\n date=h[0].header['date-obs']\n filter_name=h[0].header['filter']\n sci_chip1=h[4].data\n sci_chip2=h[1].data\n dq_chip1=h[6].data\n dq_chip2=h[3].data\n sci_chip1[dq_chip1 !=0]=np.nan\n sci_chip2[dq_chip2 !=0]=np.nan\n file_date=np.append(file_date,date)\n file_mean1=np.append(file_mean1,np.nanmean(sci_chip1))\n file_mean2=np.append(file_mean2,np.nanmean(sci_chip2))\n \n file_date = [pd.to_datetime(d,format='%Y-%m-%d') for d in file_date]\n Mean_c1=np.mean(file_mean1)\n STD_c1 =np.std(file_mean1)\n \n Mean_c2=np.mean(file_mean2)\n STD_c2 =np.std(file_mean2)\n \n upper_sigma_c1=Mean_c1 + 3.0*STD_c1\n lower_sigma_c1=Mean_c1 - 3.0*STD_c1\n \n upper_sigma_c2=Mean_c2 + 3.0*STD_c2\n lower_sigma_c2=Mean_c2 - 3.0*STD_c2\n \n #Plotting the Observations Date vs. Average value for UVIS chip1\n plt.scatter(file_date,file_mean1)\n plt.xlabel('Date-Obs')\n plt.ylabel('Mean Values')\n plt.title('Bias Chip1 Statistics\\n Mean Value: {} Std.Dev Values: {}'.format(\"%.3f\" %Mean_c1, \"%.3f\" %STD_c1))\n plt.xticks(rotation=30)\n plt.axhline(y=upper_sigma_c1, xmin=-100,xmax=100,linewidth=2, color='red')\n plt.axhline(y=lower_sigma_c1, xmin=-100,xmax=100,linewidth=2, color='red')\n plt.axhline(y=Mean_c1, xmin=-100,xmax=100,linewidth=1, color='blue')\n plt.savefig('Statistics chip1 data plot.png')\n# plt.show()\n plt.clf()\n #Plotting the Observations Date vs. Average value for UVIS chip1\n plt.scatter(file_date,file_mean2)\n plt.xlabel('Date-Obs')\n plt.ylabel('Mean Values')\n plt.title('Bias Chip2 Statistics\\n Mean Value: {} Std.Dev Values: {}'.format(\"%.3f\" %Mean_c2, \"%.3f\" %STD_c2))\n plt.xticks(rotation=30)\n plt.axhline(y=upper_sigma_c2, xmin=-100,xmax=100,linewidth=2, color='red')\n plt.axhline(y=lower_sigma_c2, xmin=-100,xmax=100,linewidth=2, color='red')\n plt.axhline(y=Mean_c2, xmin=-100,xmax=100,linewidth=1, color='blue')\n plt.savefig('Statistics chip2 data plot.png')\n plt.xlabel('Date-Obs')\n plt.ylabel('Mean Values')\n # plt.show()\n plt.clf() \n\n #Remove the sigma cliiped images from directory to new directory\n list_of_files= glob.glob('*flt.fits')\n for im in list_of_files:\n h = fits.open(im)\n sci_chip1=h[4].data\n sci_chip2=h[1].data\n dq_chip1=h[6].data\n dq_chip2=h[3].data\n sci_chip1[dq_chip1 !=0]=np.nan\n sci_chip2[dq_chip2 !=0]=np.nan\n# print(h[0].header['Rootname'],' ','chip1',' ',np.nanmax(sci_chip1),' ',np.nanmin(sci_chip1),' ', np.nanmean(sci_chip1),' ',np.nanstd(sci_chip1),' ',np.nanmedian(sci_chip1))\n if np.nanmean(sci_chip1) >= upper_sigma_c1 or np.nanmean(sci_chip1) <= lower_sigma_c1:\n print(h[0].header['Rootname'],' ','chip1',' ',np.nanmax(sci_chip1),' ',np.nanmin(sci_chip1),' ', np.nanmean(sci_chip1),' ',np.nanstd(sci_chip1),' ',np.nanmedian(sci_chip1))\n os.system('mv {}_flt.fits {}'.format(h[0].header['Rootname'],Folder))\n if np.nanmean(sci_chip2) >= upper_sigma_c2 or np.nanmean(sci_chip2) <= lower_sigma_c2: \n print(h[0].header['Rootname'],' ','chip2',' ',np.nanmax(sci_chip2),' ',np.nanmin(sci_chip2),' ', np.nanmean(sci_chip2),' ',np.nanstd(sci_chip2),' ',np.nanmedian(sci_chip2))\n os.system('mv {}_flt.fits {}'.format(h[0].header['Rootname'],Folder))\n h.close()", "def read_snap(folder,FirstFile,LastFile,\n props,template,RedshiftsToRead,RedshiftList): \n nTrees = 0\n nHalos = 0 \n nTreeHalos = np.array([],dtype=np.int32)\n \n filter_list = []\n for prop in props:\n if props[prop]:\n filter_list.append((prop,template[prop]))\n filter_dtype = np.dtype(filter_list) \n gals = np.array([],dtype=filter_dtype) \n \n SnapshotList=np.array([],dtype=np.int32)\n \n for iredshift in range(0,len(RedshiftList)-1):\n if RedshiftsToRead[iredshift]: \n print (\"\\n\\nReading redshift: \", RedshiftList[iredshift], \"\\n\")\n for ifile in range(FirstFile,LastFile+1):\n char_redshift=\"%0.2f\" % RedshiftList[iredshift]\n filename = folder+'/'+'SA_z'+char_redshift+\"_\"+\"%d\"%(ifile)\n f = open(filename,\"rb\")\n \n this_nTrees = np.fromfile(f,np.int32,1)\n nTrees += this_nTrees\n this_nHalos = np.fromfile(f,np.int32,1)\n nHalos += this_nHalos\n print (\"File \", ifile,\" nGals = \",this_nHalos) \n \n addednTreeHalos = np.fromfile(f,np.int32,this_nTrees)\n nTreeHalos = np.append(nTreeHalos,addednTreeHalos)\n this_addedGalaxy = np.fromfile(f,template,this_nHalos) # all properties\n addedGalaxy = np.zeros(this_nHalos,dtype=filter_dtype) # selected props\n \n for prop in template.names:\n if props[prop]:\n addedGalaxy[prop] = this_addedGalaxy[prop]\n gals = np.append(gals,addedGalaxy) \n f.close() \n #endfor\n #endif \n SnapshotList=np.append(SnapshotList,gals['SnapNum'][len(gals)-1])\n #endfor\n \n return (gals, SnapshotList)", "def compute_average(image_list: list) -> array:\n\n # open first image and make into array of type float\n average_image = array(Image.open(image_list[0]), 'f')\n\n for image_file in image_list[1:]:\n try:\n average_image += array(Image.open(image_file))\n except:\n print(\"{0}...skipped\", image_file)\n average_image /= len(image_list)\n\n # return average as uint8\n return array(average_image, 'uint8')", "def comp_output_spectra(self):\n assert(hasattr(self,'r'))\n \n self.nx=int(self.nx)\n \n r_mat=self.r.T.reshape(self.nx,self.nx,self.N)\n\n in_allfreqs = np.fft.fftshift(np.fft.fftfreq(self.nx,d=self.L/self.nx))\n \n self.freqs=in_allfreqs[self.nx/2:]\n \n r_dft_flat=np.fft.fftshift(np.fft.fft2(r_mat,axes=[0,1]),axes=[0,1])*(self.L/self.nx)**2\n\n r_pw=abs(r_dft_flat)**2 \n r_pw_profiles=gl.dft2d_profiles(r_pw)\n \n self.re_pw_profile=np.mean(r_pw_profiles,axis=0)\n self.he_pw_profile=self.inputs.in_mean_pw_profile", "def whiskerStat_multiext(filename,sigma,noise=False,mag=None,exptime=None):\n hdu=pf.open(filename)\n data = []\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n Mcc=np.zeros(Nobj)\n Mrr = np.zeros(Nobj)\n Mrc = np.zeros(Nobj)\n r50 = np.zeros(Nobj)\n for i in range(Nobj):\n print i\n imgo = hdui.data[i][4:].reshape(160,160)\n psf = rebin(imgo,(40,40))\n if noise == True:\n gain = 0.21 # convert electrons to ADU\n zeropoint = 26.794176 # r band, from Nikolay\n objectphoton = exptime*10**(0.4*(zeropoint - mag))\n skyphoton = 8.460140*exptime\n bkg = skyphoton*gain\n img = (psf * objectphoton + skyphoton)*gain\n img = img + add_imageNoise(img) - bkg\n else:\n img = psf\n Mcc[i],Mrr[i],Mrc[i]=complex2ndMoments(img,sigma)\n r50[i] = mfwhm(img)[5]\n data.append([np.mean(Mcc),np.mean(Mrr),np.mean(Mrc),np.mean(r50)])\n data = np.array(data)\n datamean =np.array([robust_mean(data[:,0]),robust_mean(data[:,1]),robust_mean(data[:,2]),robust_mean(data[:,3])])\n #r50 = 0.5*2.35482*np.sqrt((datamean[0]+datamean[1])/2.)*0.27\n r50moffat = datamean[3]*0.27\n whk = ((datamean[0]-datamean[1])**2 + (2.*datamean[2])**2)**(0.25)*0.27\n phi = np.rad2deg(0.5*np.arctan2(2.*datamean[2],(datamean[0]-datamean[1])))\n datasubmean = data - datamean\n whkrms = (robust_mean((datasubmean[:,0] - datasubmean[:,1])**2 + 4.*datasubmean[:,2]**2))**(0.25)*0.27\n np.savetxt(filename[0:-6]+'txt',[r50moffat,whk,phi,whkrms,datamean[0],datamean[1],datamean[2]],fmt='%10.5f')\n return '---done !-----'", "def calc_flux_array(self):\n \n # First determine the associated spectrum\n self.compute_template_spectrum()\n\n # Calculate baseline counts to normalise fluxes we scan over\n # Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps\n b = self.setup_b_instance(0,add_ps_mask=True)\n mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])\n A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)\n\n # Array to get LLs when no profile likelihood run\n norun = np.array([1.0, 1.0, 1.0, 1.0])\n\n # Now setup and compute the arrays\n LL_array = np.array([]) \n A_array_short = np.array([])\n spect_array = np.array([])\n\n for i in range(len(A_array)):\n print \"on i =\",i\n # Calculate LL\n if i == 0:\n b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)\n else:\n for key in b1.fixed_template_dict_nested.keys():\n b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]\n ll_val = b1.ll(norun,4,4)\n # Make triangle\n\n # Append to arrays\n LL_array = np.append(LL_array,ll_val)\n A_array_short = np.append(A_array_short,A_array[i])\n spect_array = self.spectrum*np.array(A_array_short)\n\n # Save output\n np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)\n np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)\n np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array)", "def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)", "def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1", "def flatcombine(dir='Flats/*/dark_subtracted/'):\n\n for d in glob(dir):\n\n directory = \"/\".join(d.split('/')[0:2]) + '/swarped'\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n keys = ['OBJECT', 'CAMNAME', 'FWINAME', 'ITIME', 'OBSDATE', 'FLSPECTR', 'HISTORY']\n images = ImageFileCollection(d, keywords=keys, glob_include='d*.fits')\n\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='on', camera='narrow', \n done='Dark Subtracted', output='cKNarrowLampOnH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='off', camera='narrow', \n done='Dark Subtracted', output='cKNarrowLampOffH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='on', camera='wide', done='Dark Subtracted',\n output='cKWideLampOnH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='off', camera='wide', done='Dark Subtracted',\n output='cKWideLampOffH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='on', camera='narrow', done='Dark Subtracted',\n output='cKNarrowLampOnKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='off', camera='narrow', done='Dark Subtracted',\n output='cKNarrowLampOffKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='on', camera='wide', done='Dark Subtracted',\n output='cKWideLampOnKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='off', camera='wide', done='Dark Subtracted',\n output='cKWideLampOffKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='on', camera='narrow', done='Dark Subtracted',\n output='cNarrowLampOnJ', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='off', camera='narrow', done='Dark Subtracted',\n output='cKNarrowLampOffJ', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='on', camera='wide', done='Dark Subtracted',\n output='cKWideLampOnJ', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='off', camera='wide', done='Dark Subtracted',\n output='cKWideLampOffJ', type='PIXEL')", "def calculate_psf_tilts():\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)", "def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]", "def norm_and_stack(images):\n imagestack = np.dstack(tuple([cv2.imread(image, cv2.IMREAD_UNCHANGED) for image in images]))\n mean = np.mean(imagestack)\n std = np.std(imagestack)\n new_im = (imagestack - mean)/std \n \n return new_im, mean, std", "def OFlow(testdirs, opath):\n\n\t# create directories if doesn't exist\n\tif not os.path.exists(opath):\n\t\tos.makedirs(opath)\n\n\tif not os.path.exists(opath+'/each_image'):\n\t\tos.makedirs(opath+'/each_image')\n\t\t\n\tif not os.path.exists(opath+'/masks'):\n\t\tos.makedirs(opath+'/masks')\n\t\t\n\tif not os.path.exists(opath+'/scaled_masks'):\n\t\tos.makedirs(opath+'/scaled_masks')\n\n\tdcount = 0\t# directory count\n\n\tfor d in testdirs:\n\n\t\th = d[-65:-1]\t# hash\n\n\t\t# create directory for each hash to save individual optical flow images separately\n\t\tif not os.path.exists(opath+'/each_image/'+h):\n\t\t\tos.makedirs(opath+'/each_image/'+h)\n\n\t\tprvs = cv2.imread(d + 'frame0000.png', 0)\t# previous image\n\n\t\ts = (prvs.shape[0], prvs.shape[1], 3)\t# hsv image shape\n\n\t\thsv = np.zeros(s, np.uint8)\n\t\thsv[...,1] = 255\n\t\t\n\t\tms = (prvs.shape[0], prvs.shape[1])\t\t# mask shape\n\t\t\n\t\tmask = np.zeros(ms, np.uint8)\n\t\tsum_mask = np.zeros(ms, np.uint8)\n\t\tscaled_mask = np.zeros(ms, np.uint8)\n\t\t\n\t\tprint(\"dir: \",dcount,\" dim: \",sum_mask.shape)\n\t\tdcount += 1\n\t\t\n\t\tflag = 0\n\n\t\tfor i in range(1,100):\n\t\t\tnxt = cv2.imread(d + 'frame00'+str(i).zfill(2)+'.png', 0)\t# next image\n\t\t\tflow = cv2.calcOpticalFlowFarneback(prvs,nxt, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n\t\t\tmag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])\t# calculates magnitude and angles of 2D vectors\n\t\t\t\n\t\t\thsv[...,0] = ang*180/np.pi/2\n\t\t\t\n\t\t\thsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)\n\t\t\tbgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)\n\t\t\tomg = cv2.cvtColor(bgr,cv2.COLOR_BGR2GRAY)\n\t#\t\tprint(omg.dtype)\n\t#\t\tprint(omg.shape)\n\t\t\t\n\t\t\tcv2.imshow('frame1',omg)\n\t\t\tom = Image.fromarray(omg)\n\t\t\tom.save(opath+'/each_image/'+h+'/frame00'+str(i).zfill(2)+'.png', 0)\n\t\t\t\n\t\t\tomg2 = omg\n\n\t\t\t# scaling with random hardcoded values\n\t\t\tfor r in range(omg2.shape[0]):\n\t\t\t\tfor c in range(omg2.shape[1]):\n\t\t\t\t\tif omg2[r][c] < 32:\n\t\t\t\t\t\tsum_mask[r][c] += 0\n\t\t\t\t\telif omg2[r][c] < 128:\n\t\t\t\t\t\tsum_mask[r][c] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tsum_mask[r][c] += 2\n\n\t\t\tflag = cv2.waitKey(30) & 0xff\n\t\t\tif flag == 27:\t\t# press ESC to exit\n\t\t\t break\n\n\t\t\tprvs = nxt\n\n\t\t# generating mask based on sum_mask again based on random hardcoded values\n\t\tfor r in range(prvs.shape[0]):\n\t\t\tfor c in range(prvs.shape[1]):\n\t\t\t\tif sum_mask[r][c] > 50:\n\t\t\t\t\tmask[r][c] = 2\n\t\t\t\t\tscaled_mask[r][c] = 255\n\t\t\t\telif sum_mask[r][c] > 15:\n\t\t\t\t\tmask[r][c] = 1\n\t\t\t\t\tscaled_mask[r][c] = 128\n\t\tomask = Image.fromarray(mask)\n\t\tomask.save(opath+'/masks/'+h+'.png', 0)\n\t\tosmask = Image.fromarray(scaled_mask)\n\t\tosmask.save(opath+'/scaled_masks/'+h+'.png', 0)\n\n\tcv2.destroyAllWindows()", "def modspec(self, elem):\r\n image = elem.reshape(self.stimshape)\r\n fourier = np.fft.rfft2(image)\r\n mid = int(fourier.shape[0]/2)\r\n power = np.abs(fourier)**2\r\n avgmag = np.array([(power[ii] + power[-ii])/2 for ii in range(mid)])\r\n return avgmag", "def plotPower(array):\r\n ny, nx = array.shape\r\n nyc = ny/2 - 1\r\n nxc = nx/2 - 1\r\n array2, r, theta = reproject_image_into_polar(array, [nyc,nxc])\r\n # Let's average over theta\r\n array3 = np.zeros(ny)\r\n for i in range(ny):\r\n array3[i] = np.sum(array2[i,:])/np.float(nx)\r\n \r\n return array3", "def spectrum2D(path):\n image = cv2.imread(path,0)\n fftOriginal = np.fft.fft2(image)\n shiftedFFT = np.fft.fftshift(fftOriginal)\n shiftedFFTMagnitude = np.abs(shiftedFFT)\n ## AVERAGE RADIAL\n rows = image.shape[0]\n cols = image.shape[1]\n midRow = rows/2+1\n midCol = cols/2+1\n maxRadius = math.ceil(np.sqrt((midRow+2)**2 + (midCol+2)**2))\n radialProfile = np.zeros((maxRadius, 1))\n count = np.zeros((maxRadius, 1))\n for i in range(cols):\n for j in range(rows):\n radius = np.sqrt((j-midRow)**2 + (i-midCol)**2)\n thisIndex = math.ceil(radius) + 1\n radialProfile[thisIndex] = radialProfile[thisIndex] + shiftedFFTMagnitude[j,i]\n count[thisIndex] = count[thisIndex] + 1\n radialProfile_new = radialProfile / count\n for k in range(radialProfile_new.shape[0]):\n if math.isnan(radialProfile_new[k]):\n radialProfile_new[k] = 0.\n return radialProfile_new", "def masterPGFlat(flat_list, master_dark_fname, normalize = 'median', local_sig_bad_pix = 3, \\\n global_sig_bad_pix = 9, local_box_size = 11, hotp_map_fname = None, verbose=False,\n output_dir = None, zeroth_order_flat_fname = None, zeroth_transmission_factor = 0.00016, offsets = [4,-1],\n normal_flat_fname = None, plot = False):\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n\n if verbose:\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open all files into a 3D array\n foo = np.empty((dark_shape[0],dark_shape[1],len(flat_list)))\n\n #Open first flat file to check exposure time\n first_flat_hdu = f.open(flat_list[0])\n flat_exp_time = first_flat_hdu[0].header['EXPTIME']\n filter_name = first_flat_hdu[0].header['AFT']\n\n #Open the zeroth order\n zeroth_order_flat = f.open(zeroth_order_flat_fname)[0].data\n\n\n if verbose:\n print(\"Subtracting zeroth order frame {} using transmission factor {} and offsets [{},{}]\".format(zeroth_order_flat_fname, zeroth_transmission_factor, offsets[0],offsets[1]))\n\n\n if dark_exp_time != flat_exp_time:\n print(\"The master dark file doesn't have the same exposure time as the flats. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = flat_exp_time/dark_exp_time\n print(factor)\n else:\n factor = 1.\n\n #scale the zeroth order image to the same exposure time\n zeroth_exp_factor = flat_exp_time/float(f.getheader(zeroth_order_flat_fname)['EXPTIME'])\n print(zeroth_exp_factor)\n zeroth_order_flat = zeroth_exp_factor*zeroth_order_flat #scale it to the same exposure time of PG flats\n\n #We've already read it, so we'll stick it in foo\n\n print(\"Combining flat files\")\n for i in range(0,len(flat_list)):\n #subtract dark for each file, then normalize by mode\n hdu = f.open(flat_list[i])\n d_sub = hdu[0].data - factor*master_dark\n\n #cleaned_d_sub = d_sub - ndimage.shift(zeroth_transmission_factor*zeroth_order_flat,offsets, order = 0) #full pixel shift\n\n #normalize\n if normalize == 'mode':\n d_sub = d_sub/mode(d_sub, axis = None, nan_policy = 'omit')\n elif normalize == 'median':\n d_sub = d_sub/np.nanmedian(d_sub)\n foo[:,:,i] = d_sub\n\n #Median combine frames\n uncleaned_flat = np.median(foo, axis = 2)\n\n #For PG_flat, subtract zeroth order flat\n\n flat = uncleaned_flat - shift(zeroth_transmission_factor*zeroth_order_flat,offsets, order = 0)\n\n ###Now, deal with bad pixel.\n\n #Filter bad pixels\n #bad_px = sigma_clip(flat, sigma = sig_bad_pix) #old and bad\n ###Major update here: do sigma clipping on the pix-to-pix flat with the large scale vignette removed\n ###Also add local sigma clipping\n def stddevFilter(img, box_size):\n \"\"\" from\n https://stackoverflow.com/questions/28931265/calculating-variance-of-an-image-python-efficiently/36266187#36266187\n This function compute the standard deviation of an image in a\n moving box of a given size. The pixel i,j of the output is the\n standard deviation of the pixel value in the box_size x box_size box\n around the i,j pixel in the original image.\n \"\"\"\n wmean, wsqrmean = (cv2.boxFilter(x, -1, (box_size, box_size), \\\n borderType=cv2.BORDER_REFLECT) for x in (img, img*img))\n return np.sqrt(wsqrmean - wmean*wmean)\n\n #median flat\n median_flat = median_filter(flat, local_box_size) #arbitrary size, shouldn't matter as long as it's big enough\n #standard deviation image\n stddev_im = stddevFilter(flat, local_box_size)\n\n #Local clipping\n local_bad_pix = np.abs(median_flat - flat) > local_sig_bad_pix*stddev_im\n\n #Global clipping here to reject awful pixels and dust, bad columns, etc\n pix_to_pix = flat/median_flat\n global_bad_px = sigma_clip(pix_to_pix, sigma = global_sig_bad_pix).mask #9 seems to work best\n\n #also set all 0 and negative pixels in flat as bad\n non_positive = flat <= 0\n\n #logic combine\n bad_px = np.logical_or(global_bad_px, local_bad_pix)\n\n #also add non_positive pixels\n bad_px = np.logical_or(bad_px, non_positive)\n\n #Normalize good pixel values\n if normalize == 'median':\n norm_flat = flat/np.nanmedian(flat[~bad_px])\n elif normalize == 'mode':\n norm_flat = flat/mode(flat, axis = None, nan_policy = 'omit')\n #Stick it back in the last hdu\n hdu[0].data = norm_flat\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created master flat by median combining the following:\"\n for i in range(len(flat_list)):\n hdu[0].header['HISTORY'] = flat_list[i]\n if normalize == 'median':\n hdu[0].header['HISTORY'] = \"Normalized to the median of the master flat\"\n elif normalize == 'mode':\n hdu[0].header['HISTORY'] = \"Normalized to the mode of the master flat\"\n hdu[0].header['HISTORY'] = \"Performed bad pixel local and global sigma clipping with {}, {}sigmas\".format(local_sig_bad_pix, global_sig_bad_pix)\n hdu[0].header['HISTORY'] = \"Zeroth order removed by {}, with factor {}, and offsets [{},{}]\".format(zeroth_order_flat_fname, zeroth_transmission_factor, offsets[0], offsets[1])\n hdu[0].header['HISTORY'] = \"############################\"\n\n if plot:\n\t#Different limits for H and J\n print(filter_name)\n if filter_name == 'H__(1.64)':\n vmin, vmax = 1.1, 1.3\n else:\n vmin, vmax = 1.1, 1.17\n print(vmin, vmax, ' are the limits')\n if normal_flat_fname == None:\n normal_flat_fname = \"/scr/data/calibrations/median_flat_J.fits\"\n print(\"using archival normal flat\")\n normal_flat = f.open(normal_flat_fname)[0].data\n fig, ax = plt.subplots(2,2,figsize = (20,20))\n ax0 = ax[0,0].imshow(uncleaned_flat/np.nanmedian(uncleaned_flat[~bad_px])/normal_flat, origin = 'lower', vmin = vmin, vmax = vmax)\n ax1 = ax[0,1].imshow(norm_flat/normal_flat, origin = 'lower', vmin =vmin, vmax = vmax)\n ax2 = ax[1,0].imshow(uncleaned_flat/np.nanmedian(uncleaned_flat[~bad_px])/normal_flat, origin = 'lower', vmin = vmin, vmax = vmax)\n ax3 = ax[1,1].imshow(norm_flat/normal_flat, origin = 'lower', vmin =vmin, vmax = vmax)\n ax[0,0].set_xlim([400,1600])\n ax[0,0].set_ylim([400,1600])\n ax[0,1].set_xlim([400,1600])\n ax[0,1].set_ylim([400,1600])\n ax[1,0].set_xlim([400,1000])\n ax[1,0].set_ylim([1100,1600])\n ax[1,1].set_xlim([400,1000])\n ax[1,1].set_ylim([1100,1600])\n ax[0,0].set_title('PG flat')\n ax[0,1].set_title('Zeroth order subtracted')\n plt.colorbar(ax0, ax = ax[0,0])\n plt.colorbar(ax1, ax = ax[0,1])\n plt.show()\n\n #Parse the last fileanme\n if output_dir is not None:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_PG_flat.fits\"\n flat_outname = flat_outname.rsplit('/',1)[-1]\n flat_outname = output_dir+flat_outname\n else:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_PG_flat.fits\"\n\n #Write the fits file\n if verbose:\n print((\"Writing master flat to {}\".format(flat_outname)))\n hdu.writeto(flat_outname, overwrite=True)\n\n #If there's already a hot pixel map then we'll add to it.\n if hotp_map_fname != None:\n #read in the existing bp map\n #hdu = f.open(hotp_map_fname)\n #hdu[0].data += np.array(bad_px.mask, dtype=float)\n #hdu[0].data = np.logical_or(hdu[0].data.astype(bool), bad_px) #use logical or to combine bad pixel maps\n #bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n print(\"Will deal with hot pixel map from dark frames in the calibrate function\")\n\n #else:\n #Parse the last fileanme\n if output_dir is not None:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n bp_outname = bp_outname.rsplit('/',1)[-1]\n bp_outname = output_dir+bp_outname\n else:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n ##### Now write the bad pixel map\n hdu[0].data = bad_px.astype(int)#np.array(bad_px.mask, dtype=float)\n #Parse the last fileanme\n # bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n #Add history keywords\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created bad pixel map by sigma clipping on pixel-to-pixel flat{}\".format(flat_outname)\n hdu[0].header['HISTORY'] = \"Bad pixel cutoffs: local sigma = {} and global sigma = {} for clipping\".format(local_sig_bad_pix, global_sig_bad_pix)\n #hdu[0].header['HISTORY'] = \"Bad pixel cutoff of {}sigma\".format(sig_bad_pix)\n hdu[0].header['HISTORY'] = \"A pixel value of 1 indicates a bad pixel\"\n hdu[0].header['HISTORY'] = \"############################\"\n\n if verbose:\n print((\"Writing bad pixel map to {}\".format(bp_outname)))\n #Write the fits file\n hdu.writeto(bp_outname, overwrite=True)\n\n return flat_outname, bp_outname", "def manipulations(path):\r\n\r\n print (\"\\n Working on %s\\n\" %(path))\r\n\r\n # Creates a folder with the results for the current image\r\n if not os.path.exists(\"Results\\\\%s\" %(path)):\r\n os.makedirs(\"Results\\\\%s\" %(path))\r\n\r\n # The variations made of the image\r\n func.pixelImage(path, 10, 10)\r\n func.animate(path)\r\n func.colorScale(path, 0)\r\n func.colorScale(path, 1)\r\n func.colorScale(path, 2)\r\n func.scan(path, 280)\r\n func.greyImage(path)\r\n func.colorSteps(path, 1)\r\n func.inverted(path)", "def preprocess_images():\n \n # Set up the lists to collect the images and measurements\n images = []\n measurements = []\n \n # Set up the path to the data files \n data_sets_path = 'data'\n data_sets = [os.path.join(data_sets_path, i) for i\n in os.listdir(data_sets_path)]\n \n # Step through the data folders and collect the images\n # and the steering angles\n for data_set in data_sets:\n lines = []\n \n # Open up the csv file of image paths and steering angles\n with open(os.path.join(data_set,\n 'driving_log.csv')) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n for line in lines:\n source_path = line[0]\n filename = source_path.split('\\\\')[-1]\n current_path = os.path.join(data_set, 'IMG',\n filename)\n \n # Import each image and change it to RGB\n BGR_image = cv2.imread(current_path)\n image = cv2.cvtColor(BGR_image, cv2.COLOR_BGR2RGB)\n rows, cols, depth = image.shape\n flipped_image = cv2.flip(image, 1)\n \n # Create a scaled version of the image\n scale = [0.9, 1.1]\n zoomfactor = random.choice(scale)\n scale_matrix = cv2.getRotationMatrix2D((cols/2, rows/2),\n 0, zoomfactor)\n scaled_image = cv2.warpAffine(image, scale_matrix,\n (cols, rows))\n\n # Append the images to the image list\n images.append(image)\n images.append(scaled_image)\n images.append(flipped_image)\n \n # Append the steering angle to the measurements list\n measurement = float(line[3])\n measurements.append(measurement)\n measurements.append(measurement)\n measurements.append(-1*measurement)\n \n return images, measurements", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def create_azi_to_rad_sequence():\n num_tot = 30\n for i in range(2*num_tot + 1):\n angle_arr = azi_to_rad_transformation(512, i, 30)\n phase_arr = create_flat_phase(512, 0)\n delta_1_arr = create_delta_1(phase_arr, angle_arr)\n delta_2_arr = create_delta_2(angle_arr)\n cv2.imwrite('frame' + str(i) +'.tiff', delta_2_arr)\n print(\"Frame \" + str(i))", "def load_images_from_folder(folder, n_cases,patch_size, mask_path, mask_type, mask_name,normalize=False, imrotate=False):\n\n# # Initialize the arrays:\n# if imrotate: # number of images is 4 * n_im\n# bigy = np.empty((n_im * 4, 64, 64))\n# bigx = np.empty((n_im * 4, 64, 64, 2))\n# else:\n# bigy = np.empty((n_im, 64, 64))\n# bigx = np.empty((n_im, 64, 64, 2))\n\n# im = 0 # image counter\n bigy = []\n filenames = os.listdir(folder)\n\n for filename in filenames[n_cases[0]:n_cases[1]]:\n if not filename.startswith('.'):\n temp = loadmat(os.path.join(folder, filename))['res']\n print temp.shape\n # Clean the STONE sense recon data\n row, col = temp.shape\n temp = np.reshape(temp, (row, col, -1))\n #valid_mask = (np.abs(np.squeeze(temp[int(row/2), int(col/2), :])) != 0)\n #final_images = temp[:,:,valid_mask]\n final_images = temp\n \n# # Resize images\n #final_images = np.abs(final_images)\n final_images_resized = np.zeros((patch_size,patch_size,final_images.shape[2]))\n for i in range(final_images.shape[2]):\n final_images_resized[:,:,i] = cv2.resize(final_images[:,:,i], (patch_size,patch_size))\n \n# # Only take a small part of the data\n# final_images = final_images[140:180,140:180,:]\n \n# # Convert to abs values\n# final_images = np.abs(final_images)\n# \n# # Normalize based on single patient case\n# final_images = (final_images - np.mean(final_images)) / np.std(final_images)\n \n# bigy_temp = cv2.imread(os.path.join(folder, filename),\n# cv2.IMREAD_GRAYSCALE)\n \n \n bigy.append(final_images_resized)\n \n bigy = np.asarray(bigy)\n cases, row, col, imgs = bigy.shape\n bigy = np.transpose(np.reshape(np.transpose(bigy, (1,2,3,0)), (row, col, -1)), (2,0,1))\n \n # convert to k-space\n imgs, row, col = bigy.shape\n bigx = np.empty((imgs, row, col, 2))\n mask = read_mask(mask_path=mask_path,mask_type=mask_type,mask_name=mask_name,patch_size=patch_size,show_image=False)\n for i in range(imgs):\n bigx[i, :, :, :] = create_x(np.squeeze(bigy[i,:,:]),mask)\n \n # convert bigx from complex to abs values\n bigy = np.abs(bigy)\n \n# im += 1\n# if imrotate:\n# for angle in [90, 180, 270]:\n# bigy_rot = im_rotate(bigy_temp, angle)\n# bigx_rot = create_x(bigy_rot, normalize)\n# bigy[im, :, :] = bigy_rot\n# bigx[im, :, :, :] = bigx_rot\n# im += 1\n\n# if imrotate:\n# if im > (n_im * 4 - 1): # how many images to load\n# break\n# else:\n# if im > (n_im - 1): # how many images to load\n# break\n\n# if normalize:\n# bigx = (bigx - np.amin(bigx)) / (np.amax(bigx) - np.amin(bigx))\n\n return bigx, bigy", "def proc_modscag(fn_list, extent=None, t_srs=None):\n #Use cubic spline here for improve upsampling \n ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')\n stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list) \n #Create stack here - no need for most of mastack machinery, just make 3D array\n #Mask values greater than 100% (clouds, bad pixels, etc)\n ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)\n\n stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)\n stack_count.set_fill_value(0)\n stack_min = ma_stack.min(axis=0).astype(np.uint8)\n stack_min.set_fill_value(0)\n stack_max = ma_stack.max(axis=0).astype(np.uint8)\n stack_max.set_fill_value(0)\n stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)\n stack_med.set_fill_value(0)\n\n out_fn = stack_fn + '_count.tif'\n iolib.writeGTiff(stack_count, out_fn, ds_list[0])\n out_fn = stack_fn + '_max.tif'\n iolib.writeGTiff(stack_max, out_fn, ds_list[0])\n out_fn = stack_fn + '_min.tif'\n iolib.writeGTiff(stack_min, out_fn, ds_list[0])\n out_fn = stack_fn + '_med.tif'\n iolib.writeGTiff(stack_med, out_fn, ds_list[0])\n\n ds = gdal.Open(out_fn)\n return ds", "def coldaverage( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n avenames = names # create an output list to average\n\n# assume only a limited range of galactic latitudes are available\n# not range above +/-60.\n use60Range = False\n minGlat = 90. # initialize to extremea\n maxGlat = -90.\n maxEl = -90.\n minEl = 90.\n ncold = 0\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'AST': # speed up by only looking at astronomy files\n continue\n \n rs.read_spec_ast(filename) # An observation, read values\n\n if rs.telel < 0: # only working with observations, skip elevation <= 0.\n continue\n\n maxGlat = max( rs.gallat, maxGlat)\n minGlat = min( rs.gallat, minGlat)\n maxEl = max( rs.telel, maxEl)\n minEl = min( rs.telel, minEl)\n # end for all files loop, looking for max el and latitude ranges\n\n # if any high galactic latitudes, use only above +/-60d \n if minGlat < -60. or maxGlat > 60.:\n minGlat = -60.\n maxGlat = 60.\n else: # else no high galactic latitude data\n # use highest galactic latitudes - +/-5.degrees\n if -minGlat > maxGlat: # if negative latitudes higher\n minGlat = minGlat + 5.\n maxGlat = 90.\n else: # else positive latitudes higher\n maxGlat = maxGlat - 5.\n minGlat = -90.\n\n # only use the elevations above 60 degrees, if any\n if maxEl > 60.:\n maxEl = 60.\n else:\n maxEl = maxEl - 10. #else must use highest elevations available\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if rs.telel < maxEl:\n continue\n\n if rs.gallat > maxGlat or rs.gallat < minGlat:\n avenames[ncold] = filename\n ncold = ncold + 1\n # end of for all files loop\n\n ncold, cold = average( avenames[0:ncold]) # now use generic program for averages\n if ncold < 1:\n print 'No Cold load files; can not calibrate!'\n exit()\n\n return ncold, cold, minEl, maxEl", "def calc_rsi(image):\n\n # roll axes to conventional row,col,depth\n img = np.rollaxis(image, 0, 3)\n\n # bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral\n COAST = img[:, :, 0]\n B = img[:, :, 1]\n G = img[:, :, 2]\n Y = img[:, :, 3]\n R = img[:, :, 4]\n RE = img[:, :, 5]\n NIR1 = img[:, :, 6]\n NIR2 = img[:, :, 7]\n\n arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))\n dd = (2 * NIR1 - R) - (G - B)\n gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5\n gndvi = old_div((NIR1 - G), (NIR1 + G))\n ndre = old_div((NIR1 - RE), (NIR1 + RE))\n ndvi = old_div((NIR1 - R), (NIR1 + R))\n ndvi35 = old_div((G - R), (G + R))\n ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))\n nirry = old_div((NIR1), (R + Y))\n normnir = old_div(NIR1, (NIR1 + R + G))\n psri = old_div((R - B), RE)\n rey = old_div((RE - Y), (RE + Y))\n rvi = old_div(NIR1, R)\n sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69\n vi1 = old_div((10000 * NIR1), (RE) ** 2)\n vire = old_div(NIR1, RE)\n br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))\n gr = old_div(G, R)\n rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))\n\n ###Built-Up indices\n wvbi = old_div((COAST - RE), (COAST + RE))\n wvnhfd = old_div((RE - COAST), (RE + COAST))\n\n ###SIs\n evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))\n L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES\n savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))\n msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)\n bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))\n rgi = old_div(R, G)\n bri = old_div(B, R)\n\n rsi = np.stack(\n [arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,\n wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],\n axis=2)\n\n return rsi", "def testSampleImageStats(self):\n \n imgfiles = []\n imgfiles.append(\"v1_i1_g_m400_s20_f.fits\")\n imgfiles.append(\"v1_i1_g_m400_s20_u16.fits\")\n imgfiles.append(\"v1_i2_g_m400_s20_f.fits\")\n imgfiles.append(\"v1_i2_g_m400_s20_u16.fits\")\n imgfiles.append(\"v2_i1_p_m9_f.fits\")\n imgfiles.append(\"v2_i1_p_m9_u16.fits\")\n imgfiles.append(\"v2_i2_p_m9_f.fits\")\n imgfiles.append(\"v2_i2_p_m9_u16.fits\")\n\n afwdataDir = os.getenv(\"AFWDATA_DIR\")\n if not afwdataDir:\n print >> sys.stderr, \"Skipping tests as afwdata is not setup\"\n return\n \n for imgfile in imgfiles:\n \n imgPath = os.path.join(afwdataDir, \"Statistics\", imgfile)\n\n # get the image and header\n dimg = afwImage.DecoratedImageF(imgPath)\n fitsHdr = dimg.getMetadata()\n\n # get the true values of the mean and stdev\n trueMean = fitsHdr.getAsDouble(\"MEANCOMP\")\n trueStdev = fitsHdr.getAsDouble(\"SIGCOMP\")\n\n # measure the mean and stdev with the Statistics class\n img = dimg.getImage()\n statobj = afwMath.makeStatistics(img, afwMath.MEAN | afwMath.STDEV)\n mean = statobj.getValue(afwMath.MEAN)\n stdev = statobj.getValue(afwMath.STDEV)\n\n # print trueMean, mean, trueStdev, stdev\n self.assertAlmostEqual(mean, trueMean, 8)\n self.assertAlmostEqual(stdev, trueStdev, 8)", "def baseline(spectra):\n\n return spectra - np.mean(spectra, axis=0)", "def get_resample(name: str) -> str:\n\n methods = {\n \"first\":\n \"\"\"\nimport numpy as np\n\ndef first(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.ones(in_ar[0].shape)\n for i in reversed(range(len(in_ar))):\n mask = in_ar[i] == 0\n y *= mask\n y += in_ar[i]\n\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"last\":\n \"\"\"\nimport numpy as np\n\ndef last(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.ones(in_ar[0].shape)\n for i in range(len(in_ar)):\n mask = in_ar[i] == 0\n y *= mask\n y += in_ar[i]\n\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"max\":\n \"\"\"\nimport numpy as np\n\ndef max(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.max(in_ar, axis=0)\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"average\":\n \"\"\"\nimport numpy as np\n\ndef average(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n div = np.zeros(in_ar[0].shape)\n for i in range(len(in_ar)):\n div += (in_ar[i] != 0)\n div[div == 0] = 1\n \n y = np.sum(in_ar, axis = 0, dtype = 'uint16')\n y = y / div\n \n np.clip(y,0,255, out = out_ar)\n\"\"\"}\n\n if name not in methods:\n raise ValueError(\n \"ERROR: Unrecognized resampling method (see documentation): '{}'.\".\n format(name))\n\n return methods[name]", "def avg_metric(sharp_path, deblurred_path): # TODO1 do multiprocessing in those methods\n sum_psnr = 0\n sum_mse = 0\n sum_ssim = 0\n\n # List all files\n files_orig = [f for f in listdir(sharp_path) if isfile(join(sharp_path, f))]\n files_deb = [f for f in listdir(deblurred_path) if isfile(join(deblurred_path, f))]\n\n count = 0\n for orig, deb in zip(files_orig, files_deb):\n orig_fn = join(sharp_path, orig)\n deb_fn = join(deblurred_path, deb)\n # Load images\n orig_img = cv2.imread(orig_fn)\n deb_img = cv2.imread(deb_fn)\n orig_img = np.divide(orig_img, 255)\n deb_img = np.divide(deb_img, 255)\n\n # Compute metrics\n sum_psnr += peak_signal_noise_ratio(orig_img, deb_img)\n sum_mse += mean_squared_error(orig_img, deb_img)\n sum_ssim += structural_similarity(orig_img, deb_img, multichannel=True)\n\n count += 1\n print('Analyzed: {}/{}'.format(count, len(files_orig)))\n\n # Average\n avg_psnr = sum_psnr/len(files_orig)\n avg_mse = sum_mse/len(files_orig)\n avg_ssim = sum_ssim/len(files_orig)\n\n return avg_mse, avg_psnr, avg_ssim", "def _load_volume_from_jpg(files: List[str]) -> np.ndarray:\n volume = []\n for file in files:\n img = cv.imread(file, cv.IMREAD_GRAYSCALE)\n volume.append(img)\n # plt.imshow(img, cmap='gray')\n # plt.show()\n volume = np.stack(volume)\n volume = volume / volume.max() * 1024\n return volume", "def stabilize_intensity(which_cam, cam, verbose=False):\n L = 0.5 # Correction Rate\n mags = np.ones(12) ### !\n ntraps = len(mags)\n iteration = 0\n while iteration < 5:\n iteration += 1\n print(\"Iteration \", iteration)\n\n im = cam.latest_frame()\n try:\n trap_powers = analyze_image(which_cam, im, ntraps, iteration, verbose)\n except (AttributeError, ValueError) as e:\n print(\"No Bueno, error occurred during image analysis:\\n\", e)\n break\n\n mean_power = trap_powers.mean()\n rel_dif = 100 * trap_powers.std() / mean_power\n print(f'Relative Power Difference: {rel_dif:.2f} %')\n if rel_dif < 0.8:\n print(\"WOW\")\n break\n\n deltaP = [mean_power - P for P in trap_powers]\n dmags = [(dP / abs(dP)) * sqrt(abs(dP)) * L for dP in deltaP]\n mags = np.add(mags, dmags)\n print(\"Magnitudes: \", mags)\n break\n # self._update_magnitudes(mags)\n _ = analyze_image(im, ntraps, verbose=verbose)", "def getAverageL(image):\n # get image as numpy array \n im = np.array(image) \n \n # get shape \n w,h = im.shape \n \n # get average \n return np.average(im.reshape(w*h))", "def compute_TVL1(image_dir):\n image_list = sorted(os.listdir(image_dir))\n pre_image = cv2.imread()\n TVL1 = DualTVL1()\n flow = []\n for i in range(1, len(image_list)):\n cur_image = cv2.imread()\n cur_flow = TVL1.calc(pre_image, cur_image, None)\n pre_image = cur_image\n\n max_val = lambda x: max(max(x.flatten()), abs(min(x.flatten())))\n cur_flow = cur_flow / max_val(cur_flow)\n flow.append(cur_flow)\n flow = np.array(flow)\n return flow", "def augmentImageByRotation(imagePath, numRotations, originalBin, data_path):\n angles = np.linspace(0, 360, numRotations + 1, endpoint=False)[1:]\n augmentedImages = []\n rgb = cv2.imread(os.path.join(data_path, imagePath))\n dt = exrToNumpy(os.path.join(os.path.dirname(os.path.join(data_path, imagePath)), 'liver_0_dt.exr'))\n dl = exrToNumpy(os.path.join(os.path.dirname(os.path.join(data_path, imagePath)), 'liver_0_dl.exr'))\n newRatings = open(new_ratings_file_path, 'a')\n generated_images = 0\n for i, angle in enumerate(angles):\n # try different offsets if exact rotation does not give the same bin as the original image\n offsets = np.linspace(0, 10, 100, endpoint=False)\n newBin = None\n save_version = False\n for offset in offsets:\n rgb_r, dt_r, dl_r = rotate_image(rgb, dt, dl, angle + offset)\n # rate image\n rating, _ = rate_tumordistance_depth.rateImage(None, None, None, num_tumors, images=[rgb_r, dt_r, dl_r])\n newBin = getBinFromRating(rating, num_bins)\n # if bins match, save image\n if originalBin == newBin:\n save_version = True\n break\n if save_version:\n rotDir = os.path.join(augmentedDataPath, os.path.dirname(imagePath) + \"_rot\" + str(i))\n os.makedirs(rotDir)\n # save images to rotDir\n rgb_path = os.path.join(rotDir, 'liver_0.png')\n dt_path = os.path.join(rotDir, 'liver_0_dt.exr')\n dl_path = os.path.join(rotDir, 'liver_0_dl.exr')\n cv2.imwrite(rgb_path, rgb_r)\n save_exr_from_numpy(dt_path, dt_r)\n save_exr_from_numpy(dl_path, dl_r)\n # make entry in new ratings file\n save_path = os.path.relpath(rgb_path, data_path)\n newRatings.write(getRatingsLine(save_path, rating))\n generated_images += 1\n newRatings.close()\n if generated_images == 0:\n print \"Could not match bins. (\" + imagePath + \")\"\n return generated_images", "def shift_to_match(folder, x=0, y=0, z=0, angle=0, dim=120, energies=['40kVp', '80kVp'],\n directory='D:/Research/Python Data/CBCT/'):\n path = directory + folder + '/'\n\n for energy in energies:\n\n load_path = path + energy\n\n gof.create_folder(folder_name='Shifted Matrices', directory_path=load_path)\n\n load_path = load_path + '/RawMatrices/'\n save_path = path + energy + '/Shifted Matrices/'\n\n # Get all the slices to shift\n files = os.listdir(load_path)\n\n for file in files:\n temp = np.load(load_path + file)\n\n if energy is '40kVp':\n # Don't need to do anything for 40 kVp images\n np.save(save_path + file, temp)\n else:\n savefile = file\n # Shift within XY plane (the slice plane)\n if y is not 0:\n temp = np.roll(temp, y, axis=0) # Y shift\n if x is not 0:\n temp = np.roll(temp, x, axis=1) # X shift\n\n # Rotation\n if angle is not 0:\n index = np.round(np.abs(angle), decimals=0)\n index = int(index)\n temp = rotate(temp, angle)\n temp = temp[index:index + dim, index:index + dim]\n\n # Shift slices in the z (rename files)\n if z is not 0:\n file = file.replace('.npy', '')\n file = file.replace('volume0', '')\n file = int(file) + z\n if file < 10:\n savefile = 'volume000' + str(file) + '.npy'\n elif file < 100 and file >= 10:\n savefile = 'volume00' + str(file) + '.npy'\n else:\n savefile = 'volume0' + str(file) + '.npy'\n\n np.save(save_path + savefile, temp)", "def to_volume(self, verbose=True):\n images = self.load_all_dicom_images(verbose=verbose)\n\n volume = np.stack(\n [\n x.pixel_array * x.RescaleSlope + x.RescaleIntercept\n for x in images\n ],\n axis=-1,\n ).astype(np.int16)\n return volume", "def getEfficiency(path1, path2, path3, m = 1, pickle = 1, pathm0 = None, pathm1 = None, pathm2 = None): #, intV = 300):\n \n from wfAnalyseWave import pixelsize\n \n if pickle == 1:\n\n import pickle\n with open(path1, 'rb') as wav:\n w1 = pickle.load(wav)\n with open(path2, 'rb') as wav:\n w2 = pickle.load(wav)\n with open(path3, 'rb') as wav:\n w3 = pickle.load(wav)\n \n wf1 = Wavefront(srwl_wavefront=w1)\n wf2 = Wavefront(srwl_wavefront=w2)\n wf3 = Wavefront(srwl_wavefront=w3)\n \n p1 = pixelsize(wf1)\n p2 = pixelsize(wf2)\n p3 = pixelsize(wf3)\n \n pR1 = p1[0]/p2[0]\n pR2 = p1[0]/p3[0]\n pR3 = p2[0]/p3[0]\n \n print(\"pixel size at mask [m]: {}\".format(p1))\n print(\"pixel size after mask [m]: {}\".format(p2))\n print(\"pixel size after propagation [m]: {}\".format(p3))\n print(\"ratio of pixel sizes (p1/p2): {}\".format(pR1))\n print(\"ratio of pixel sizes (p1/p3): {}\".format(pR2))\n print(\"ratio of pixel sizes (p2/p3): {}\".format(pR3))\n \n \"\"\" Intensity from wavefield \"\"\"\n I0 = wf1.get_intensity()\n I1 = wf2.get_intensity()\n I2 = wf3.get_intensity()\n \n \"\"\" Total intensity at each plane \"\"\"\n I0_tot = np.sum(I0)/(p1[0]*p1[1]) #*p1[0]#6.25e-09*s0[0]*s0[1]\n I1_tot = np.sum(I1)/(p2[0]*p2[1]) #*p2[0]#*s1[0]*s1[1]\n I2_tot = np.sum(I2)/(p3[0]*p3[1]) #*p3[0]#*s2[0]*s1[1]\n \n \n else:\n \"\"\" Intensity from tif file \"\"\"\n I0 = path1 #getImageData(\"/home/jerome/Documents/MASTERS/data/wavefields/Efficiency/intensityIN.tif\")\n I1 = path2 #getImageData('/home/jerome/Documents/MASTERS/data/wavefields/Efficiency/intensityEX_1-2.tif')\n I2 = path3 #getImageData('/home/jerome/Documents/MASTERS/data/wavefields/Efficiency/intensityPR_1-2.tif') #getImageData('/home/jerome/WPG/intensityTot_maskprop.tif') \n \n \"\"\" Total intensity at each plane \"\"\"\n I0_tot = np.sum(I0) #*p1[0]#6.25e-09*s0[0]*s0[1]\n I1_tot = np.sum(I1) #*p2[0]#*s1[0]*s1[1]\n I2_tot = np.sum(I2) #*p3[0]#*s2[0]*s1[1]\n \n s0 = np.shape(I0)\n s1 = np.shape(I1)\n s2 = np.shape(I2)\n \n print(\"Shape of I (at mask): {}\".format(s0))\n print(\"Shape of I (after mask): {}\".format(s1))\n print(\"Shape of I (after propagation): {}\".format(s2))\n \n F0 = s0[0]/s1[0]\n F1 = s0[0]/s2[0]\n F2 = s1[0]/s2[0]\n \n print(\"pixel ratio (I0/I1): {}\".format(F0))\n print(\"pixel ratio (I0/I2): {}\".format(F1))\n print(\"pixel ratio (I1/I2): {}\".format(F2))\n \n if F0 != 1.0:\n print(\"WARNING! Number of pixels in intensity files does not match! Efficiency values may not be accurate!\")\n \n if F1 != 1.0:\n print(\"WARNING! Number of pixels in intensity files does not match! Efficiency values may not be accurate!\")\n \n if F2 != 1.0:\n print(\"WARNING! Number of pixels in intensity files does not match! Efficiency values may not be accurate!\")\n \n Ir0 = (I1_tot/I0_tot)#(F0**2)*(I1_tot/I0_tot) # ratio of intensity before & after mask\n Ir1 = (I2_tot/I0_tot) #(F1**2)*(I2_tot/I0_tot) # ratio of intensity before & after mask\n Ir2 = (I2_tot/I1_tot) #(F2**2)*(I2_tot/I1_tot) # ratio of intensity before & after mask\n \n print(\"Intensity Ratio I_ex/I_in: {}\".format(Ir0))\n print(\"Intensity Ratio I_prop/I_in: {}\".format(Ir1))\n print(\"Intensity Ratio I_prop/I_exit: {}\".format(Ir2))\n \n \n plt.imshow(I0)\n plt.title(\"at mask\")\n plt.colorbar()\n plt.show()\n \n plt.imshow(I1)\n plt.title(\"After mask\")\n plt.colorbar()\n plt.show() \n \n plt.imshow(I2)\n plt.title(\"after propagation\")\n plt.colorbar()\n plt.show()\n \n \n print(\" \")\n print(\"-----Total Intensity-----\")\n print(\"At mask: {}\".format(I0_tot))\n print(\"After mask: {}\".format(I1_tot))\n print(\"After propagation: {}\".format(I2_tot))\n\n \"\"\" Defining region of interest to inspect separate orders \"\"\" \n Mi = int((s2[0]/2)-300) #initial position for order sampling\n Mf = int((s2[0]/2)+300) #final position for order sampling\n \n print(\"coordinates for start and end of each order: {}\".format((Mi,Mf)))\n \n \"\"\"Finding each order\"\"\"\n \n intV = int(s2[0]/(2*m+1)) #500 # Number of pixels for segmentation interval \n \n if m >= 1:\n # region for m=0 \n ROI_0 = ((int((s2[0]/2)-(intV/2)),Mi),((int((s2[0]/2)+(intV/2))),Mf)) \n # region for m=+1\n ROI_1 = ((ROI_0[1][0], Mi),(ROI_0[1][0] + intV, Mf))\n # region for m=-1\n ROI_n1 =((ROI_0[0][0]-intV, Mi),(ROI_0[0][0], Mf))\n if m >= 2: \n # region for m=+2\n ROI_2 = ((ROI_1[1][0], Mi),(ROI_1[1][0] + intV, Mf))\n # region for m=-2\n ROI_n2 = ((ROI_n1[0][0]-intV, Mi),(ROI_n1[0][0], Mf))\n if m >= 3: \n # region for m=+3 \n ROI_3 = ((ROI_2[1][0], Mi),(ROI_2[1][0] + intV, Mf))\n # region for m=-3\n ROI_n3 = ((ROI_n2[0][0]-intV, Mi),(ROI_n2[0][0], Mf))\n if m >= 4: \n # region for m=+4\n ROI_4 = ((ROI_3[1][0], Mi),(ROI_3[1][0] + intV, Mf))\n # region for m=-4\n ROI_n4 = ((ROI_n3[0][0]-intV, Mi),(ROI_n3[0][0], Mf))\n \n \n \n x0_0,y0_0 = ROI_0[0][0], ROI_0[0][1]\n x1_0,y1_0 = ROI_0[1][0], ROI_0[1][1]\n \n x0_1,y0_1 = ROI_1[0][0], ROI_1[0][1]\n x1_1,y1_1 = ROI_1[1][0], ROI_1[1][1]\n \n x0_n1,y0_n1 = ROI_n1[0][0], ROI_n1[0][1]\n x1_n1,y1_n1 = ROI_n1[1][0], ROI_n1[1][1] \n \n try:\n x0_2,y0_2 = ROI_2[0][0], ROI_2[0][1]\n x1_2,y1_2 = ROI_2[1][0], ROI_2[1][1]\n \n x0_n2,y0_n2 = ROI_n2[0][0], ROI_n2[0][1]\n x1_n2,y1_n2 = ROI_n2[1][0], ROI_n2[1][1]\n \n x0_3,y0_3 = ROI_3[0][0], ROI_3[0][1]\n x1_3,y1_3 = ROI_3[1][0], ROI_3[1][1]\n \n x0_n3,y0_n3 = ROI_n3[0][0], ROI_n3[0][1]\n x1_n3,y1_n3 = ROI_n3[1][0], ROI_n3[1][1] \n \n x0_4,y0_4 = ROI_4[0][0], ROI_4[0][1]\n x1_4,y1_4 = ROI_4[1][0], ROI_4[1][1]\n \n x0_n4,y0_n4 = ROI_n4[0][0], ROI_n4[0][1]\n x1_n4,y1_n4 = ROI_n4[1][0], ROI_n4[1][1]\n except NameError:\n pass\n \n \n A_0 = I2[y0_0:y1_0,x0_0:x1_0]\n A_1 = I2[y0_1:y1_1,x0_1:x1_1]\n A_n1 = I2[y0_n1:y1_n1,x0_n1:x1_n1] \n try:\n A_2 = I2[y0_2:y1_2,x0_2:x1_2]\n A_n2 = I2[y0_n2:y1_n2,x0_n2:x1_n2]\n A_3 = I2[y0_3:y1_3,x0_3:x1_3]\n A_n3 = I2[y0_n3:y1_n3,x0_n3:x1_n3]\n A_4 = I2[y0_4:y1_4,x0_4:x1_4]\n A_n4 = I2[y0_n4:y1_n4,x0_n4:x1_n4]\n except NameError:\n pass\n \n plt.imshow(A_0)\n plt.title('m=0')\n plt.colorbar()\n if pathm0 != None:\n print(\"Saving m=0 figure to path: {}\".format(pathm0))\n plt.savefig(pathm0)\n plt.show() \n \n plt.imshow(A_1)\n plt.title('m=+1')\n plt.colorbar()\n if pathm1 != None:\n print(\"Saving m=1 figure to path: {}\".format(pathm1))\n plt.savefig(pathm1)\n plt.show()\n \n plt.imshow(A_n1)\n plt.title('m=-1')\n plt.colorbar()\n plt.show() \n \n try:\n plt.imshow(A_2)\n plt.title('m=+2')\n plt.colorbar()\n if pathm2 != None:\n print(\"Saving m=2 figure to path: {}\".format(pathm2))\n plt.savefig(pathm2)\n plt.show()\n \n plt.imshow(A_n2)\n plt.title('m=-2')\n plt.colorbar()\n plt.show() \n \n plt.imshow(A_3)\n plt.title('m=+3')\n plt.colorbar()\n plt.show()\n \n plt.imshow(A_n3)\n plt.title('m=-3')\n plt.colorbar()\n plt.show()\n \n plt.imshow(A_4)\n plt.title('m=+4')\n plt.colorbar()\n plt.show()\n \n plt.imshow(A_n4)\n plt.title('m=-4')\n plt.colorbar()\n plt.show()\n except NameError:\n pass\n \n Im_0 = np.sum(A_0)\n Im_1 = np.sum(A_1)\n Im_n1 = np.sum(A_n1)\n try:\n Im_2 = np.sum(A_2)/Ir2\n Im_n2 = np.sum(A_n2)/Ir2\n Im_3 = np.sum(A_3)/Ir2\n Im_n3 = np.sum(A_n3)/Ir2\n Im_4 = np.sum(A_4)/Ir2\n Im_n4 = np.sum(A_n4)/Ir2\n except NameError:\n pass\n \n print(\" \")\n print(\"----- Intensity of m = 0-----\")\n print(\"Im_1: {}\".format(Im_0))\n print(\" \")\n print(\"----- Intensity of m = +1-----\")\n print(\"Im_1: {}\".format(Im_1))\n print(\" \")\n print(\"----- Intensity of m = -1-----\")\n print(\"Im_n1: {}\".format(Im_n1)) \n try:\n print(\" \")\n print(\"----- Intensity of m = +2-----\")\n print(\"Im_2: {}\".format(Im_2))\n print(\" \")\n print(\"----- Intensity of m = -2-----\")\n print(\"Im_n2: {}\".format(Im_n2)) \n print(\" \")\n print(\"----- Intensity of m = +3-----\")\n print(\"Im_3: {}\".format(Im_3))\n print(\" \")\n print(\"----- Intensity of m = -3-----\")\n print(\"Im_n3: {}\".format(Im_n3)) \n print(\" \")\n print(\"----- Intensity of m = +4-----\")\n print(\"Im_4: {}\".format(Im_4))\n print(\" \")\n print(\"----- Intensity of m = -4-----\")\n print(\"Im_n4: {}\".format(Im_n4))\n except NameError:\n pass\n \n if pickle == 1:\n \"\"\" Get Efficiency of each order \"\"\" # Not sure if should be dividing by total intensity at mask or after mask\n E0 = (Im_0/I0_tot)/p3[0] #p3[0]*(Im_0/I0_tot)\n E1 = (Im_1/I0_tot)/p3[0] # p3[0]*(Im_1/I0_tot)/p3[0] #\n En1 = (Im_n1/I0_tot)/p3[0] # p3[0]*(Im_n1/I0_tot)/p3[0] #\n \n try:\n E2 = p3[0]*(Im_2/I0_tot)\n En2 = p3[0]*(Im_n2/I0_tot)\n E3 = p3[0]*(Im_3/I0_tot)\n En3 = p3[0]*(Im_n3/I0_tot)\n E4 = p3[0]*(Im_4/I0_tot)\n En4 = p3[0]*(Im_n4/I0_tot)\n except NameError:\n pass\n else:\n \"\"\" Get Efficiency of each order \"\"\" # Not sure if should be dividing by total intensity at mask or after mask\n E0 = (Im_0/I0_tot)\n E1 = (Im_1/I0_tot)\n En1 = (Im_n1/I0_tot)\n \n try:\n E2 = (Im_2/I0_tot)\n En2 = (Im_n2/I0_tot)\n E3 = (Im_3/I0_tot)\n En3 = (Im_n3/I0_tot)\n E4 = (Im_4/I0_tot)\n En4 = (Im_n4/I0_tot)\n except NameError:\n pass\n \n print(\" \")\n print(\"Efficiency of m=0 order: {}\".format(E0))\n print(\"Efficiency of m=+1 order: {}\".format(E1))\n print(\"Efficiency of m=-1 order: {}\".format(En1))\n try:\n print(\"Efficiency of m=+2 order: {}\".format(E2))\n print(\"Efficiency of m=-2 order: {}\".format(En2))\n print(\"Efficiency of m=+3 order: {}\".format(E3))\n print(\"Efficiency of m=-3 order: {}\".format(En3))\n print(\"Efficiency of m=+4 order: {}\".format(E4))\n print(\"Efficiency of m=-4 order: {}\".format(En4))\n except NameError:\n pass", "def find_arms(path,fr_nb):\n im=open_frame(path,fr_nb)\n img=im.copy()\n im=img_as_ubyte(im)\n mask_h = hysteresis_thresholding(img,6,10)\n \n ksize=5\n kernel = np.ones((ksize,ksize),dtype = np.uint8)\n kernel = skimage.morphology.disk(ksize)\n \n mask = cv2.morphologyEx(mask_h, cv2.MORPH_OPEN, kernel,iterations=2)\n \n arms = mask_h-mask\n \"\"\"\n lab,_ = ndi.label(diff)\n \n arms = skimage.morphology.remove_small_objects(lab,60)\"\"\" #Only temporary, to track only the biggest\n return mask,arms", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def ps(image):\n\timage = image.astype(float)\n\tps_img = abs(pow(fft2(image), 2))\n\treturn ps_img", "def pavs (dirName,pat,dx,dy):\r\n ntotpat=0\r\n\r\n tabf=np.zeros((dx,dy),np.uint8)\r\n _tabsroi=np.zeros((dx,dy,3),np.uint8)\r\n _tabscan = np.zeros((dx,dy),np.int16)\r\n\r\n (top,tail)=os.path.split(dirName)\r\n print 'pav :',tail,'pattern :',pat\r\n patpickle=[]\r\n nampadir=os.path.join(patchpath,pat)\r\n nampadirl=os.path.join(nampadir,locabg)\r\n if not os.path.exists(nampadir):\r\n os.mkdir(nampadir)\r\n os.mkdir(nampadirl)\r\n\r\n pathpicklepat=os.path.join(picklepathdir,pat)\r\n# print pathpicklepat\r\n pathpicklepatl=os.path.join(pathpicklepat,locabg)\r\n patchpicklenamepatient=tail+'_'+patchpicklename\r\n\r\n pathpicklepatfile=os.path.join(pathpicklepatl,patchpicklenamepatient)\r\n if not os.path.exists(pathpicklepat):\r\n os.mkdir(pathpicklepat)\r\n if not os.path.exists(pathpicklepatl):\r\n os.mkdir(pathpicklepatl)\r\n if os.path.exists(pathpicklepatfile):\r\n os.remove(pathpicklepatfile)\r\n\r\n for scannumb in range (0,dy):\r\n tabp = np.zeros((dx, dy), dtype=np.uint8)\r\n tabf=np.copy(tabroipat3d[pat][scannumb])\r\n\r\n tabfc=np.copy(tabf)\r\n nbp=0\r\n if tabf.max()>0:\r\n vis=contour2(tabf,pat,dx,dy)\r\n if vis.sum()>0:\r\n _tabsroi = np.copy(tabsroi3d[scannumb])\r\n imn=cv2.add(vis,_tabsroi)\r\n imn=tagview(imn,pat,0,20)\r\n tabsroi3d[scannumb]=imn\r\n imn = cv2.cvtColor(imn, cv2.COLOR_BGR2RGB)\r\n\r\n sroifile='tr_'+str(scannumb)+'.'+typeroi\r\n filenamesroi=os.path.join(sroidir,sroifile)\r\n cv2.imwrite(filenamesroi,imn)\r\n\r\n np.putmask(tabf,tabf>0,1)\r\n\r\n atabf = np.nonzero(tabf)\r\n\r\n xmin=atabf[1].min()\r\n xmax=atabf[1].max()\r\n ymin=atabf[0].min()\r\n ymax=atabf[0].max()\r\n\r\n\r\n _tabscan=tabscan3d[scannumb]\r\n\r\n i=xmin\r\n while i <= xmax:\r\n j=ymin\r\n while j<=ymax:\r\n tabpatch=tabf[j:j+dimpavy,i:i+dimpavx]\r\n\r\n area= tabpatch.sum()\r\n targ=float(area)/pxy\r\n\r\n if targ >thrpatch:\r\n imgray = _tabscan[j:j+dimpavy,i:i+dimpavx]\r\n imagemax= cv2.countNonZero(imgray)\r\n min_val, max_val, min_loc,max_loc = cv2.minMaxLoc(imgray)\r\n\r\n if imagemax > 0 and max_val - min_val>2:\r\n nbp+=1\r\n patpickle.append(imgray)\r\n x=0\r\n #we draw the rectange\r\n while x < dimpavx:\r\n y=0\r\n while y < dimpavy:\r\n tabp[y+j][x+i]=150\r\n if x == 0 or x == dimpavx-1 :\r\n y+=1\r\n else:\r\n y+=dimpavy-1\r\n x+=1\r\n #we cancel the source\r\n tabf[j:j+dimpavy,i:i+dimpavx]=0\r\n j+=dimpavy-1\r\n j+=1\r\n i+=1\r\n\r\n if nbp>0:\r\n tabfc =tabfc+tabp\r\n ntotpat=ntotpat+nbp\r\n if scannumb not in listsliceok:\r\n listsliceok.append(scannumb)\r\n stw=tail+'_slice_'+str(scannumb)+'_'+pat+'_'+locabg+'_'+str(nbp)\r\n stww=stw+'.txt'\r\n flw=os.path.join(jpegpath,stww)\r\n mfl=open(flw,\"w\")\r\n mfl.write('#number of patches: '+str(nbp)+'\\n')\r\n mfl.close()\r\n stww=stw+'.'+typej\r\n flw=os.path.join(jpegpath,stww)\r\n scipy.misc.imsave(flw, tabfc)\r\n pickle.dump(patpickle, open(pathpicklepatfile, \"wb\"),protocol=-1)\r\n\r\n return ntotpat", "def meanSpectrum(img='g35.03_KDnh3_11.hline.self.image', nBaselineChannels=16,\n sigmaCube=3, verbose=False, nanBufferChannels=2, useAbsoluteValue=False,\n baselineMode='edge', percentile=20, continuumThreshold=None,\n meanSpectrumFile='', centralArcsec=-1, imageInfo=[], chanInfo=[], mask='',\n meanSpectrumMethod='peakOverRms', peakFilterFWHM=15, iteration=0, applyMaskToMask=False):\n if (not os.path.exists(img)):\n casalogPost(\"Could not find image = %s\" % (img))\n return\n myia = createCasaTool(iatool)\n usermaskdata = ''\n if (len(mask) > 0):\n # This is the user mask (not the minpb mask inside the cube).\n myia.open(mask)\n usermaskdata = myia.getregion()\n if (verbose): print \"shape(usermask) = \", np.array(np.shape(usermaskdata))\n if applyMaskToMask:\n usermaskmask = myia.getregion(getmask=True)\n idx = np.where(usermaskmask==False)\n casalogPost('applyMaskToMask has zeroed out %d pixels.' % (len(idx[0])))\n usermaskdata[idx] = 0\n maskAxis = findSpectralAxis(myia)\n if (np.shape(usermaskdata)[maskAxis] > 1):\n singlePlaneUserMask = False\n else:\n singlePlaneUserMask = True\n if (meanSpectrumMethod.find('meanAboveThreshold') >= 0):\n casalogPost(\"single plane user masks not supported by meanSpectrumMethod='meanAboveThreshold', try peakOverMad.\")\n myia.close()\n return\n myia.close()\n myia.open(img)\n axis = findSpectralAxis(myia)\n if verbose: print \"Found spectral axis = \", axis\n myrg = None\n if (centralArcsec < 0 or centralArcsec == 'auto'):\n centralArcsec = -1\n if (len(mask) > 0 or meanSpectrumMethod != 'peakOverMad'):\n pixels = myia.getregion()\n maskdata = myia.getregion(getmask=True)\n nchan = np.shape(maskdata)[axis]\n else:\n bmaj, bmin, bpa, cdelt1, cdelt2, naxis1, naxis2, freq = imageInfo\n blc = [0,0,0,0]\n trc = [naxis1-1,naxis2-1,0,0]\n nchan = chanInfo[0]\n myrg = createCasaTool(rgtool)\n else:\n myrg = createCasaTool(rgtool)\n bmaj, bmin, bpa, cdelt1, cdelt2, naxis1, naxis2, freq = imageInfo\n nchan = chanInfo[0]\n x0 = int(np.round(naxis1*0.5 - centralArcsec*0.5/np.abs(cdelt1)))\n x1 = int(np.round(naxis1*0.5 + centralArcsec*0.5/np.abs(cdelt1)))\n y0 = int(np.round(naxis2*0.5 - centralArcsec*0.5/cdelt2))\n y1 = int(np.round(naxis2*0.5 + centralArcsec*0.5/cdelt2))\n # avoid going off the edge of non-square images\n if (x0 < 0): x0 = 0\n if (y0 < 0): y0 = 0\n if (x0 >= naxis1): x0 = naxis1 - 1\n if (y0 >= naxis2): y0 = naxis2 - 1\n blc = [x0,y0,0,0]\n trc = [x1,y1,0,0]\n trc[axis] = nchan\n region = myrg.box(blc=blc, trc=trc)\n pixels = myia.getregion(region=region)\n casalogPost(\"Taking submask for central area of image: blc=%s, trc=%s\" % (str(blc),str(trc)))\n maskdata = myia.getregion(region=region,getmask=True)\n# myrg.done()\n if (len(mask) > 0):\n usermaskdata = submask(usermaskdata, region)\n if verbose:\n print \"shape of pixels = \", np.array(np.shape(pixels))\n if len(mask) > 0:\n if not (np.array(np.shape(pixels)) == np.array(np.shape(usermaskdata))).all():\n casalogPost(\"Mismatch in shape between image (%s) and mask (%s)\" % (np.shape(pixels),np.shape(usermaskdata)))\n if myrg is not None: myrg.done()\n return\n if (meanSpectrumMethod.find('OverRms') > 0 or meanSpectrumMethod.find('OverMad') > 0):\n # compute myrms, ignoring masked values and usermasked values\n if (meanSpectrumMethod.find('OverMad') < 0):\n casalogPost(\"Computing std on each plane\")\n else:\n casalogPost(\"Computing mad on each plane\")\n myvalue = []\n# for a in range(np.shape(pixels)[axis]):\n for a in range(nchan):\n if ((a+1)%100 == 0): \n print \"Done %d/%d\" % (a+1, nchan)\n# print \"Done %d/%d\" % (a+1, np.shape(pixels)[axis])\n # Extract this one channel\n if (axis == 2):\n if len(mask) > 0:\n mypixels = pixels[:,:,a,0]\n mymask = maskdata[:,:,a,0]\n if (singlePlaneUserMask):\n myusermask = usermaskdata[:,:,0,0]\n else:\n myusermask = usermaskdata[:,:,a,0]\n else:\n blc[axis] = a\n trc[axis] = a\n myregion = myrg.box(blc=blc,trc=trc)\n mypixels = myia.getregion(region=myregion)\n mymask = myia.getregion(region=myregion,getmask=True)\n elif (axis == 3):\n if (len(mask) > 0):\n mypixels = pixels[:,:,0,a]\n mymask = maskdata[:,:,0,a]\n if (singlePlaneUserMask):\n myusermask = usermaskdata[:,:,0,0]\n else:\n myusermask = usermaskdata[:,:,0,a]\n else:\n blc[axis] = a\n trc[axis] = a\n myregion = myrg.box(blc=blc,trc=trc)\n mypixels = myia.getregion(region=myregion)\n mymask = myia.getregion(region=myregion,getmask=True)\n \n if (len(mask) > 0):\n # user mask is typically a clean mask, so we want to use the region outside the\n # clean mask for computing the MAD, but also avoiding the masked edges of the image,\n # which are generally masked to False\n pixelsForStd = mypixels[np.where((myusermask<1) * (mymask==True))]\n else: \n # avoid the masked (typically outer) edges of the image using the built-in mask\n pixelsForStd = mypixels[np.where(mymask==True)]\n if (meanSpectrumMethod.find('OverMad') < 0):\n myvalue.append(np.std(pixelsForStd))\n else:\n myvalue.append(MAD(pixelsForStd))\n# print \"channel %4d: Using %d of %d pixels for MAD/std\" % (a,len(pixelsForStd),np.prod(np.shape(mypixels)))\n if (meanSpectrumMethod.find('OverMad') < 0):\n myrms = np.array(myvalue)\n else:\n mymad = np.array(myvalue)\n print \"Finished\"\n percentagePixelsNotMasked = 100\n if (meanSpectrumMethod.find('peakOver') == 0):\n # compute mymax (an array of channel maxima), then divide by either myrms or mymad array\n gaussianSigma = peakFilterFWHM/2.355\n myvalue = []\n casalogPost(\"B) Current memory usage: %.3f GB, resident: %.3f GB\" % (memoryUsage(), residentMemoryUsage()))\n casalogPost(\"Smoothing and computing peak on each plane.\")\n if (len(mask) > 0):\n pixels[np.where(usermaskdata==0)] = np.nan\n for a in range(nchan):\n if ((a+1)%100 == 0): \n print \"Done %d/%d\" % (a+1, nchan)\n if (axis == 2):\n if len(mask) > 0:\n mypixels = pixels[:,:,a,0]\n else:\n blc[axis] = a\n trc[axis] = a\n myregion = myrg.box(blc=blc,trc=trc)\n mypixels = myia.getregion(region=myregion)\n elif (axis == 3):\n if len(mask) > 0:\n mypixels = pixels[:,:,0,a]\n else:\n blc[axis] = a\n trc[axis] = a\n myregion = myrg.box(blc=blc,trc=trc)\n mypixels = myia.getregion(region=myregion)\n if (gaussianSigma > 1.1/2.355):\n if (len(mask) > 0):\n # taken from stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python\n V = mypixels.copy()\n V[mypixels!=mypixels] = 0\n VV = gaussian_filter(V,sigma=gaussianSigma)\n W = mypixels.copy()+1\n W[mypixels!=mypixels] = 0\n WW = gaussian_filter(W,sigma=gaussianSigma)\n mypixels = VV/WW\n myvalue.append(np.nanmax(mypixels))\n else:\n myvalue.append(np.nanmax(gaussian_filter(mypixels,sigma=gaussianSigma)))\n else:\n myvalue.append(np.nanmax(mypixels))\n print \"finished\"\n mymax = np.array(myvalue)\n if (meanSpectrumMethod == 'peakOverRms'):\n avgspectrum = mymax/myrms\n elif (meanSpectrumMethod == 'peakOverMad'):\n avgspectrum = mymax/mymad\n nansRemoved = removeNaNs(avgspectrum, verbose=True)\n threshold = 0\n edgesUsed = 0\n nansReplaced,nanmin = removeNaNs(avgspectrum, replaceWithMin=True, \n nanBufferChannels=nanBufferChannels, verbose=True)\n elif (meanSpectrumMethod.find('meanAboveThreshold') == 0):\n if (continuumThreshold is not None):\n belowThreshold = np.where(pixels < continuumThreshold)\n if verbose:\n print \"shape of belowThreshold = \", np.shape(belowThreshold)\n pixels[belowThreshold] = 0.0\n if (len(mask) > 0):\n pixelsWithinUserMask = len(np.where(usermaskdata<1)[0])\n pixelsWithinCubeMask = len(np.where(maskdata==1)[0])\n pixelsForMAD = pixels[np.where((maskdata==1) * (usermaskdata<1))]\n npixels = np.prod(np.shape(pixels))\n percent = 100.*len(pixelsForMAD) / npixels\n percent2 = 100.*pixelsWithinUserMask/npixels\n percent3 = 100.*pixelsWithinCubeMask/npixels\n casalogPost(\"Using %d of %d pixels (%.2f%%) for MAD: %d (%.2f%%) outside user mask, %d (%.2f%%) satisfy cube mask, i.e. minpb masking\" % (len(pixelsForMAD),npixels, percent, pixelsWithinUserMask, percent2, pixelsWithinCubeMask, percent3))\n else:\n pixelsForMAD = pixels[np.where(maskdata==1)] # ignore the outer mask edges of the cube\n casalogPost(\"Using %d of %d pixels for MAD\" % (len(pixelsForMAD),np.prod(np.shape(pixels))))\n# pixelsForMAD = pixels # previous method\n madTime = timeUtilities.time()\n std = MAD(pixelsForMAD, axis=None)\n endMadTime = timeUtilities.time()\n casalogPost(\"%.1f sec elapsed in computing MAD within meanSpectrum()\" % (endMadTime-madTime))\n if verbose: print \"MAD of cube = \", std\n naxes = len(np.shape(pixels))\n nchan = np.shape(pixels)[axis]\n\n if (baselineMode == 'edge'):\n # Method #1: Use the two edges of the spw to find the line-free rms of the spectrum\n nEdgeChannels = nBaselineChannels/2\n # lower edge\n blc = np.zeros(naxes)\n trc = [i-1 for i in list(np.shape(pixels))]\n trc[axis] = nEdgeChannels\n myrg = createCasaTool(rgtool)\n region = myrg.box(blc=blc, trc=trc)\n lowerEdgePixels = myia.getregion(region=region)\n # drop all floating point zeros (which will drop pixels outside the mosaic image mask)\n lowerEdgePixels = lowerEdgePixels[np.where(lowerEdgePixels!=0.0)]\n stdLowerEdge = MAD(lowerEdgePixels)\n medianLowerEdge = nanmedian(lowerEdgePixels)\n if verbose: print \"MAD of %d channels on lower edge = %f\" % (nBaselineChannels, stdLowerEdge)\n\n # upper edge\n blc = np.zeros(naxes)\n trc = [i-1 for i in list(np.shape(pixels))]\n blc[axis] = trc[axis] - nEdgeChannels\n region = myrg.box(blc=blc, trc=trc)\n upperEdgePixels = myia.getregion(region=region)\n# myrg.done()\n # drop all floating point zeros\n upperEdgePixels = upperEdgePixels[np.where(upperEdgePixels!=0.0)]\n stdUpperEdge = MAD(upperEdgePixels)\n medianUpperEdge = nanmedian(upperEdgePixels)\n casalogPost(\"meanSpectrum(): edge medians: lower=%.10f, upper=%.10f\" % (medianLowerEdge, medianUpperEdge))\n\n if verbose: \n print \"MAD of %d channels on upper edge = %f\" % (nEdgeChannels, stdUpperEdge)\n if (stdLowerEdge <= 0.0):\n edgesUsed = 1\n stdEdge = stdUpperEdge\n medianEdge = medianUpperEdge\n elif (stdUpperEdge <= 0.0):\n edgesUsed = 0\n stdEdge = stdLowerEdge\n medianEdge = medianLowerEdge\n else:\n edgesUsed = 2\n stdEdge = np.mean([stdLowerEdge,stdUpperEdge])\n medianEdge = np.mean([medianLowerEdge,medianUpperEdge])\n \n if (baselineMode != 'edge'):\n # Method #2: pick the N channels with the lowest absolute values (to avoid\n # confusion from absorption lines and negative bowls of missing flux)\n npixFraction = nBaselineChannels*1.0/nchan\n if (centralArcsec < 0):\n allPixels = myia.getregion()\n else:\n allPixels = pixels\n myia.close()\n # Convert all NaNs to zero\n allPixels[np.isnan(allPixels)] = 0\n # Drop all floating point zeros and internally-masked pixels from calculation\n if (mask == ''):\n allPixels = allPixels[np.where((allPixels != 0) * (maskdata==True))]\n else:\n # avoid identical zeros and clean mask when looking for lowest pixels\n allPixels = allPixels[np.where((allPixels != 0) * (maskdata==True) * (usermaskdata<1))]\n # Take absolute value\n absPixels = np.abs(allPixels)\n # Find the lowest pixel values\n percentileThreshold = scoreatpercentile(absPixels, percentile)\n idx = np.where(absPixels < percentileThreshold)\n # Take their statistics\n stdMin = MAD(allPixels[idx])\n medianMin = nanmedian(allPixels[idx])\n\n if (baselineMode == 'edge'):\n std = stdEdge\n median = medianEdge\n casalogPost(\"meanSpectrum(): edge mode: median=%f MAD=%f threshold=%f (edgesUsed=%d)\" % (medianEdge, stdEdge, medianEdge+stdEdge*sigmaCube, edgesUsed))\n else:\n std = stdMin\n median = medianMin\n edgesUsed = 0\n casalogPost(\"meanSpectrum(): min mode: median=%f MAD=%f threshold=%f\" % (medianMin, stdMin, medianMin+stdMin*sigmaCube))\n \n if (axis == 2 and naxes == 4):\n # drop the degenerate axis so that avgOverCube will work with nanmean(axis=0)\n pixels = pixels[:,:,:,0]\n if (len(mask) > 0):\n maskdata = propagateMaskToAllChannels(maskdata, axis)\n else:\n maskdata = ''\n avgspectrum, percentagePixelsNotMasked = avgOverCube(pixels, useAbsoluteValue, mask=maskdata, usermask=usermaskdata)\n if meanSpectrumMethod.find('OverRms') > 0:\n avgspectrum /= myrms\n elif meanSpectrumMethod.find('OverMad') > 0:\n avgspectrum /= mymad\n threshold = median + sigmaCube*std\n casalogPost(\"Using threshold above which to compute mean spectrum = %f\" % (threshold), verbose)\n pixels[np.where(pixels < threshold)] = 0.0\n casalogPost(\"Running avgOverCube\")\n avgspectrumAboveThreshold, percentagePixelsNotMasked = avgOverCube(pixels, useAbsoluteValue, threshold, mask=maskdata, usermask=usermaskdata)\n if meanSpectrumMethod.find('OverRms') > 0:\n avgspectrumAboveThreshold /= myrms\n elif meanSpectrumMethod.find('OverMad') > 0:\n avgspectrumAboveThreshold /= mymad\n if verbose: \n print \"Running removeNaNs (len(avgspectrumAboveThreshold)=%d)\" % (len(avgspectrumAboveThreshold))\n nansRemoved = removeNaNs(avgspectrumAboveThreshold)\n nansReplaced,nanmin = removeNaNs(avgspectrumAboveThreshold, replaceWithMin=True, \n nanBufferChannels=nanBufferChannels)\n nchan, firstFreq, lastFreq, channelWidth = chanInfo\n frequency = np.linspace(firstFreq, lastFreq, nchan)\n if verbose: \n print \"Running writeMeanSpectrum\"\n writeMeanSpectrum(meanSpectrumFile, frequency, avgspectrum, nansReplaced, threshold,\n edgesUsed, nchan, nanmin, centralArcsec, mask, iteration)\n if (myrg is not None): myrg.done()\n return(avgspectrum, nansRemoved, nansReplaced, threshold, \n edgesUsed, nchan, nanmin, percentagePixelsNotMasked)", "def airy_and_slicer(surface, wavelength, scale_mas, PSF_window, N_window):\n\n # Print message to know we are updating the cache\n print('Recalculating Airy Pattern for %.3f microns' % wavelength)\n\n # Plate scales [Px, Py] for each spaxel scale in mm / arcsec,\n # depending on the surface [IS: Image Slicer, DET: Detector]\n plate_scales = {'IS': {4.0: [125, 250], 60.0: [16.67, 16.67]},\n 'DET': {4.0: [3.75, 7.5], 60.0: [0.5, 0.5]}}\n plate_x = plate_scales[surface][scale_mas][0]\n plate_y = plate_scales[surface][scale_mas][1]\n\n # We know how many Microns the pixels of the Geometric PSF span [PSF_window / N_window]\n pix_sampling = PSF_window / N_window # micron at the detector plane\n # Using the plate scale we calculate how many m.a.s each of those pixels have to span\n pix_scale_x = pix_sampling / plate_x # milliarcsec / pixel\n pix_scale_y = pix_sampling / plate_y # milliarcsec / pixel\n\n # Calculate the relative size of the pupil aperture needed to ensure the PSF is\n # sampled with the given pix_scale at the focal plane\n ELT_DIAM = 39\n MILIARCSECS_IN_A_RAD = 206265000\n pix_rad_x = pix_scale_x / MILIARCSECS_IN_A_RAD # radians / pixel\n pix_rad_y = pix_scale_y / MILIARCSECS_IN_A_RAD\n RHO_APER_x = pix_rad_x * ELT_DIAM / (wavelength * 1e-6)\n RHO_APER_y = pix_rad_y * ELT_DIAM / (wavelength * 1e-6)\n RHO_OBSC_x = 0.30 * RHO_APER_x # ELT central obscuration\n RHO_OBSC_y = 0.30 * RHO_APER_y # ELT central obscuration\n\n # Sanity check\n PIX_RAD_x = RHO_APER_x * wavelength / ELT_DIAM * 1e-6\n PIX_RAD_y = RHO_APER_y * wavelength / ELT_DIAM * 1e-6\n PIX_MAS_x = PIX_RAD_x * MILIARCSECS_IN_A_RAD\n PIX_MAS_y = PIX_RAD_y * MILIARCSECS_IN_A_RAD\n\n # Define the ELT pupil mask. Note that we use a central obscuration too\n N = 2048\n x = np.linspace(-1, 1, N)\n xx, yy = np.meshgrid(x, x)\n\n # To get the anamorphic scaling we define the equation for an ellipse\n rho = np.sqrt((xx / RHO_APER_x) ** 2 + (yy / RHO_APER_y) ** 2)\n\n # (1) Propagate to the Image Slicer Focal plane\n elt_mask = (RHO_OBSC_x / RHO_APER_x < rho) & (rho < 1.0)\n pupil = elt_mask * np.exp(1j * elt_mask)\n image_electric = fftshift(fft2(pupil))\n\n if surface == 'IS':\n # print(\"IS\")\n # We are already at the Image Slicer, don't do anything else\n min_pix, max_pix = N // 2 - N_window // 2, N // 2 + N_window // 2\n final_psf = (np.abs(image_electric))**2\n final_psf /= np.max(final_psf)\n crop_psf = final_psf[min_pix:max_pix, min_pix:max_pix]\n\n elif surface == 'DET':\n # print(\"DET\")\n # (1.1) Add slicer effect by masking\n # We mask the PSF covering a band of size 1x SPAXEL, depending on the scale\n # If we have 4x4 mas, then we cover a band of 4 mas over the PSF\n x_min, x_max = -N/2 * PIX_MAS_x, N/2 * PIX_MAS_x\n y_min, y_max = -N/2 * PIX_MAS_y, N/2 * PIX_MAS_y\n x_slice = np.linspace(x_min, x_max, N, endpoint=True)\n y_slice = np.linspace(y_min, y_max, N, endpoint=True)\n x_grid, y_grid = np.meshgrid(x_slice, y_slice)\n slicer_mask = np.abs(y_grid) < scale_mas / 2\n\n # ## Show the PSF both in [mas] space where it should be circular and in [pixel] space where it should be anamorphic\n # fig, ax = plt.subplots(1, 1)\n # img1 = ax.imshow((np.abs(image_electric))**2, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # # plt.colorbar(img1, ax=ax)\n # ax.set_title(r'Airy Pattern | %.1f mas scale | Wavelength: %.3f $\\mu$m' % (scale_mas, wavelength))\n # ax.set_xlabel(r'X [mas]')\n # ax.set_ylabel(r'Y [mas]')\n # ax.set_xlim([-10, 10])\n # ax.set_ylim([-10, 10])\n #\n # fig, ax = plt.subplots(1, 1)\n # img1 = ax.imshow((np.abs(image_electric))**2, extent=[-N/2, N/2, -N/2, N/2], cmap='bwr')\n # ax.set_title(r'Airy Pattern | %.1f mas scale | Wavelength: %.3f $\\mu$m' % (scale_mas, wavelength))\n # ax.set_xlabel(r'Pixels [ ]')\n # ax.set_ylabel(r'Pixels [ ]')\n # ax.set_xlim([-100, 100])\n # ax.set_ylim([-100, 100])\n\n # plt.show()\n\n # (2) Propagate the masked electric field to Pupil Plane\n pup_grating = ifft2(fftshift(slicer_mask * image_electric))\n # (2.1) Add pupil mask, this time without the central obscuration\n aperture_mask = rho < 1.0\n\n # (3) Propagate back to Focal Plane\n final_focal = fftshift(fft2(aperture_mask * pup_grating))\n final_psf = (np.abs(final_focal))**2\n final_psf /= np.max(final_psf)\n\n # (4) Crop the PSF to fit to the necessary window to ease the convolutions\n min_pix, max_pix = N//2 - N_window//2, N//2 + N_window//2\n crop_psf = final_psf[min_pix:max_pix, min_pix:max_pix]\n\n # If we want to show the plots for Documentation\n\n # fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n # psf_airy = (np.abs(image_electric))**2\n # img1 = ax1.imshow(psf_airy, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # ax1.axhline(y=scale_mas/2, linestyle='--', color='black')\n # ax1.axhline(y=-scale_mas/2, linestyle='--', color='black')\n # ax1.set_xlabel(r'X [mas]')\n # ax1.set_ylabel(r'Y [mas]')\n # ax1.set_xlim([-15, 15])\n # ax1.set_ylim([-15, 15])\n # ax1.set_title(r'Airy Pattern | Slicer Mask %.1f mas' % scale_mas)\n #\n # img2 = ax2.imshow(aperture_mask * (np.abs(pup_grating)**2), extent=[-1, 1, -1, 1], cmap='bwr')\n # ax2.set_title(r'Pupil Plane | Aperture Mask')\n # ax2.set_xlim([-0.25, 0.25])\n # ax2.set_ylim([-0.25, 0.25])\n #\n # img3 = ax3.imshow(final_psf, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # ax3.set_xlabel(r'X [mas]')\n # ax3.set_ylabel(r'Y [mas]')\n # ax3.set_xlim([-15, 15])\n # ax3.set_ylim([-15, 15])\n # ax3.set_title(r'Diffraction Effects')\n # plt.show()\n\n return crop_psf", "def sum_images(filelist):\n\n nfiles = np.size(filelist)\n\n print(\"Summing together {} files\".format(nfiles))\n\n ims = []\n\n for fname in filelist:\n hdu = f.open(fname)\n ims.append(hdu[0].data)\n\n ims = np.array(ims)\n\n sum_im = np.nansum(ims, axis=0)\n hdu[0].data = sum_im\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"Summed up the following images:\"\n\n for fname in filelist:\n hdu[0].header['HISTORY'] = fname\n\n hdu[0].header['HISTORY'] = \"######################\"\n\n outname = filelist[-1].split('.')[0]+'_summed.fits'\n\n print(\"Writing out final file to {}\".format(outname))\n\n hdu.writeto(outname, overwrite=True)", "def get_spectral_response(wavelengths_arr, stack):\n\n resolution = 1\n for i, re_index in enumerate(stack.index):\n step_size = stack.thickness.sum() / 2 ** 17\n z0 = np.linspace(0, stack.thickness[i], round(stack.thickness[i] / step_size))\n resolution += len(z0)\n\n electric_tot_te = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n electric_tot_tm = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n reflectivity_te = np.zeros(len(wavelengths_arr), dtype=complex)\n reflectivity_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_te = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n index_tot = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n theta_tot = np.zeros([len(stack.index) + 1, wavelengths_arr.size], dtype=complex)\n\n a0 = 1 # Initial amplitude of electric field going toward the coating\n b0 = 0 # Initial amplitude of electric field going back the coating (if 0, no counter propagating light)\n theta = 0 # angle of the beam with respect to the coating\n\n for i, lam in enumerate(wavelengths_arr):\n # print a progressbar in the console\n print_progressbar(i, len(wavelengths_arr), suffix = '%')\n electric_tot_te[:, i], electric_tot_tm[:, i], reflectivity_te[i], reflectivity_tm[i], transmission_te[i], \\\n transmission_tm[i], index_tot, L, theta_tot = transfer_matrix_method(stack, a0, b0, lam, theta)\n return reflectivity_te, transmission_te, 1 - (reflectivity_te + transmission_te)", "def _image_average(self, images):\n image_data = [\n image.normalize().data for image in images\n # Workaround: skip partial volcano images at the edges\n if image.data.shape[0] == image.data.shape[1]\n ]\n return np.rint(\n np.mean(image_data, axis=0)\n ).astype(np.uint8)", "def als2reg(image, number): \n\n root = image[:-5]\n inf = open(\"%s.als.%i\" % (root, number))\n lines = inf.readlines()\n \n stars = []\n i=0\n while i < len(lines):\n \n line = lines[i]\n if re.match(\"#\", line):\n i+=1\n else:\n temp = line.strip().split()\n newstar = Star(name=\"IRAF-%i\" % int(temp[0]), xval=float(temp[1]),\n yval=float(temp[2]), mag=float(temp[3]),\n magu=float(temp[4]))\n stars.append(newstar)\n i+=2\n\n newstars=Starlist(stars=stars)\n return newstars", "def sort(self):\n\n img_files = os.listdir(self.path)\n\n img_list = {}\n\n for img_file in img_files:\n filename = os.path.join(self.path, img_file)\n\n try:\n img = Image.open(filename)\n except:\n continue\n\n print \"Analyzing %s\" % img_file\n\n points = self.points(img.size[0], img.size[1])\n key = \"\"\n for point in points:\n\n # Get the average color for each point\n ave_points = self.diamond_points(point[0], point[1])\n red = 0\n green = 0\n blue = 0\n for ave_point in ave_points:\n try:\n rgb = img.getpixel(ave_point)\n red += rgb[0]\n green += rgb[1]\n blue += rgb[2]\n except IndexError:\n pass\n red /= len(ave_points)\n green /= len(ave_points)\n blue /= len(ave_points)\n\n # Bitdepths:\n # 12 bit - 4096 colors, range 0-F, divide by 16\n # 9 bit - 512 colors, range 0-7, divide by 32\n # 6 bit - 64 colors, range 0-3, divide by 64\n # 3 bit - 8 colors, range 0-1, divide by 128\n\n if self.num_colors == 8:\n div = 128\n elif self.num_colors == 64:\n div = 64\n elif self.num_colors == 512:\n div = 32\n elif self.num_colors == 4096:\n div = 16\n else:\n self.usage()\n\n # Lower the bitdepth\n red = int(red / div)\n green = int(green / div)\n blue = int(blue / div)\n\n # Add to the key\n key += \"%x%x%x\" % (red, green, blue)\n\n # Add the key if needed\n if key not in img_list:\n img_list[key] = []\n\n # Add the file to the list\n img_list[key].append(img_file)\n\n # Go through and rename the files, based on the img_list dictionary\n # and the prefix\n num = 1\n for img in sorted(img_list.iterkeys()):\n for filename in sorted(img_list[img]):\n name, ext = os.path.splitext(filename)\n new_filename = \"%s%04d%s\" % (self.prefix, num, ext)\n full_filename = os.path.join(self.path, filename)\n full_new_filename = os.path.join(self.path, new_filename)\n if os.path.isfile(full_new_filename):\n print \"File %s exists - aborting!\" % full_new_filename\n return\n\n os.rename(full_filename, full_new_filename)\n print \"Renamed %s to %s.\" % (filename, new_filename)\n num += 1", "def get_airglow_spectra(self):\n\n self.AIRGLOW_DIR = os.getcwd()+'/AirglowSpectra/cosby/'\n AF = glob.glob(self.AIRGLOW_DIR+'/*.txt')\n AL = []\n for file in AF:\n data = pd.read_csv(file, delim_whitespace=True)\n d = data.to_records(index=False)\n AL.append(np.array(d))\n self.AirglowLines = np.hstack(AL)", "def get_spectra_pixell(alm1, alm2=None, spectra=None):\n \n if spectra is None:\n if alm2 is None:\n cls = curvedsky.alm2cl(alm1)\n else:\n cls = curvedsky.alm2cl(alm1, alm2)\n l = np.arange(len(cls))\n return l, cls\n \n \n cls = curvedsky.alm2cl(alm1[:,None], alm2[None,:])\n l = np.arange(len(cls[0,0]))\n cl_dict = {}\n for i, l1 in enumerate([\"T\",\"E\",\"B\"]):\n for j, l2 in enumerate([\"T\",\"E\",\"B\"]):\n cl_dict[l1+l2] = cls[i,j]\n \n return(l, cl_dict)", "def subamost_media(imagem, r):\n\n lx, ly = imagem.shape\n img_sub_mean = np.zeros([int(lx/r), int(ly/r)])\n img_aux = np.zeros([r, r])\n for i in np.arange(r-1, lx, r):\n for j in np.arange(r-1, ly, r):\n img_aux = imagem[i - r/2:i + r/2, j - r/2:j + r/2]\n img_sub_mean[int(i/r), int(j/r)] = img_aux.mean()\n\n return img_sub_mean", "def make_spectra(directory,frame):\n oober = st.short_oober(directory, frame=frame)\n #st.MakeVelocitySpectra(oober,frame)\n #st.MakeAccelSpectra(oober,frame)\n #st.MakeMagneticSpectra(oober,frame)\n st.MakeDensitySpectra(oober,frame)", "def get_average(self, samples=50):\n first = self.layers[0].load_image()\n res = np.zeros(first.shape, dtype=float)\n intervals = len(self.layers)/samples\n for l in self.layers[::int(intervals)]:\n img = l.load_image().astype(float)\n res += img\n l.image = None\n return samples**-1*res", "def extract(image):\n # calculate fft\n spectrum = np.fft.fft2(image)\n fshift = np.fft.fftshift(spectrum) # to make the magnitude graph with the lower frequency in the middle\n\n # calculate phase and magnitude\n magnitude = np.abs(fshift)\n phase = np.angle(fshift)\n\n return magnitude, phase", "def rot_mosaic(source_dir='K:/IID_SaltonSea/Tasks/Soil mapping/PhotoDocumentation/Processing/',\r\n output_dir='K:/IID_SaltonSea/Tasks/Soil mapping/PhotoDocumentation/Final/',\r\n file_pattern='IID201905*.jpg', sub_dir=False, k=1, replace=False): \r\n \r\n \r\n if sub_dir:\r\n mosaics = []\r\n for root, dirnames, filenames in os.walk(source_dir):\r\n for filename in fnmatch.filter(filenames, file_pattern):\r\n mosaics.append(os.path.join(root, filename))\r\n else:\r\n mosaics = glob.glob(source_dir + file_pattern) \r\n \r\n g = 0\r\n r = 0\r\n s = 0\r\n for m in mosaics:\r\n f = output_dir + os.path.basename(m)\r\n if not os.path.exists(f):\r\n img = improc.imops.imio.imread(m)\r\n img = np.rot90(img, k=k) \r\n improc.imops.imio.imsave(f, img)\r\n print('generated: %s' % f)\r\n print('')\r\n g+=1\r\n elif replace:\r\n img = improc.imops.imio.imread(m)\r\n img = np.rot90(img, k=k)\r\n improc.imops.imio.imsave(f, img)\r\n print('replaced: %s' % f)\r\n print('')\r\n r+=1\r\n else:\r\n print('skipping: %s' % m)\r\n print('')\r\n s+=1\r\n\r\n print('generated total of %i files' % g)\r\n print('replaced total of %i files' % r)\r\n print('skipped total of %i files' % s)", "def masterFlat(flat_list, master_dark_fname, normalize = 'median', local_sig_bad_pix = 3, \\\n global_sig_bad_pix = 9, local_box_size = 11, hotp_map_fname = None, verbose=False,\n output_dir = None,min_flux=1000):\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n\n if verbose:\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open all files into a 3D array\n #foo = np.empty((dark_shape[0],dark_shape[1],len(flat_list)))\n foo = []\n\n #Open first flat file to check exposure time and filter\n first_flat_hdu = f.open(flat_list[0])\n flat_exp_time = first_flat_hdu[0].header['EXPTIME']\n\n\n\n if dark_exp_time != flat_exp_time:\n print(\"The master dark file doesn't have the same exposure time as the flats. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = flat_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #We've already read it, so we'll stick it in foo\n\n print(\"Combining flat files\")\n for i in range(0,len(flat_list)):\n try: \n #subtract dark for each file, then normalize by mode\n hdu = f.open(flat_list[i],ignore_missing_end=True)\n d_sub = hdu[0].data - factor*master_dark\n if np.nanmedian(d_sub) < min_flux:\n #print(\"Skipping file {}, because its flux is lower than {}\".format(flat_list[i],min_flux))\n continue\n #normalize\n if normalize == 'mode':\n d_sub = d_sub/mode(d_sub, axis = None, nan_policy = 'omit')\n elif normalize == 'median':\n d_sub = d_sub/np.nanmedian(d_sub)\n #foo[:,:,i] = d_sub\n foo.append(d_sub)\n except:\n print(\"Some error. Skipping file {}\".format(i)) \n #Median combine frames\n flat = np.median(foo, axis = 0)\n\n #Filter bad pixels\n #bad_px = sigma_clip(flat, sigma = sig_bad_pix) #old and bad\n ###Major update here: do sigma clipping on the pix-to-pix flat with the large scale vignette removed\n ###Also add local sigma clipping\n def stddevFilter(img, box_size):\n \"\"\" from\n https://stackoverflow.com/questions/28931265/calculating-variance-of-an-image-python-efficiently/36266187#36266187\n This function compute the standard deviation of an image in a\n moving box of a given size. The pixel i,j of the output is the\n standard deviation of the pixel value in the box_size x box_size box\n around the i,j pixel in the original image.\n \"\"\"\n wmean, wsqrmean = (cv2.boxFilter(x, -1, (box_size, box_size), \\\n borderType=cv2.BORDER_REFLECT) for x in (img, img*img))\n return np.sqrt(wsqrmean - wmean*wmean)\n\n #median flat\n median_flat = median_filter(flat, local_box_size) #arbitrary size, shouldn't matter as long as it's big enough\n #standard deviation image\n stddev_im = stddevFilter(flat, local_box_size)\n\n #Local clipping\n local_bad_pix = np.abs(median_flat - flat) > local_sig_bad_pix*stddev_im\n\n #Global clipping here to reject awful pixels and dust, bad columns, etc\n pix_to_pix = flat/median_flat\n global_bad_px = sigma_clip(pix_to_pix, sigma = global_sig_bad_pix).mask #9 seems to work best\n\n #also set all 0 and negative pixels in flat as bad\n non_positive = flat <= 0\n\n #logic combine\n bad_px = np.logical_or(global_bad_px, local_bad_pix)\n\n #also add non_positive pixels\n bad_px = np.logical_or(bad_px, non_positive)\n\n #Normalize good pixel values\n if normalize == 'median':\n norm_flat = flat/np.nanmedian(flat[~bad_px])\n elif normalize == 'mode':\n norm_flat = flat/mode(flat, axis = None, nan_policy = 'omit')\n #Stick it back in the last hdu\n hdu[0].data = norm_flat\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created master flat by median combining the following:\"\n for i in range(len(flat_list)):\n hdu[0].header['HISTORY'] = flat_list[i]\n hdu[0].header['HISTORY'] = \"Normalized to the median of the master flat\"\n hdu[0].header['HISTORY'] = \"Performed bad pixel local and global sigma clipping with {}, {}sigmas\".format(local_sig_bad_pix, global_sig_bad_pix)\n hdu[0].header['HISTORY'] = \"############################\"\n\n #Parse the last fileanme\n if output_dir is not None:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n flat_outname = flat_outname.rsplit('/',1)[-1]\n flat_outname = output_dir+flat_outname\n else:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n\n #Write the fits file\n if verbose:\n print((\"Writing master flat to {}\".format(flat_outname)))\n hdu.writeto(flat_outname, overwrite=True)\n\n #If there's already a hot pixel map then we'll add to it.\n if hotp_map_fname != None:\n #read in the existing bp map\n #hdu = f.open(hotp_map_fname)\n #hdu[0].data += np.array(bad_px.mask, dtype=float)\n #hdu[0].data = np.logical_or(hdu[0].data.astype(bool), bad_px) #use logical or to combine bad pixel maps\n #bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n print(\"Will deal with hot pixel map from dark frames in the calibrate function\")\n\n #else:\n #Parse the last fileanme\n if output_dir is not None:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n bp_outname = bp_outname.rsplit('/',1)[-1]\n bp_outname = output_dir+bp_outname\n else:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n ##### Now write the bad pixel map\n hdu[0].data = bad_px.astype(int)#np.array(bad_px.mask, dtype=float)\n #Parse the last fileanme\n # bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n #Add history keywords\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created bad pixel map by sigma clipping on pixel-to-pixel flat{}\".format(flat_outname)\n hdu[0].header['HISTORY'] = \"Bad pixel cutoffs: local sigma = {} and global sigma = {} for clipping\".format(local_sig_bad_pix, global_sig_bad_pix)\n #hdu[0].header['HISTORY'] = \"Bad pixel cutoff of {}sigma\".format(sig_bad_pix)\n hdu[0].header['HISTORY'] = \"A pixel value of 1 indicates a bad pixel\"\n hdu[0].header['HISTORY'] = \"############################\"\n\n if verbose:\n print((\"Writing bad pixel map to {}\".format(bp_outname)))\n #Write the fits file\n hdu.writeto(bp_outname, overwrite=True)\n\n return flat_outname, bp_outname", "def remix(self):\n self.original = audio.LocalAudioFile(self.infile)\n #for i, segment in enumerate(self.original.analysis.segments):\n # segment.encode(\"seg_%s.mp3\" % i)\n print \"\\n\\n\\n\"\n loudnesses = [x.timbre[0] for i, x in enumerate(self.original.analysis.segments)]\n brightnesses = [x.timbre[1] for i, x in enumerate(self.original.analysis.segments)]\n flatnesses = [x.timbre[2] for i, x in enumerate(self.original.analysis.segments)]\n attacks = [x.timbre[3] for i, x in enumerate(self.original.analysis.segments)]\n timbre5 = [x.timbre[4] for i, x in enumerate(self.original.analysis.segments)]\n timbre6 = [x.timbre[5] for i, x in enumerate(self.original.analysis.segments)]\n timbre7 = [x.timbre[6] for i, x in enumerate(self.original.analysis.segments)]\n timbre8 = [x.timbre[7] for i, x in enumerate(self.original.analysis.segments)]\n timbre9 = [x.timbre[8] for i, x in enumerate(self.original.analysis.segments)]\n timbre10 = [x.timbre[9] for i, x in enumerate(self.original.analysis.segments)]\n timbre11 = [x.timbre[10] for i, x in enumerate(self.original.analysis.segments)]\n timbre12 = [x.timbre[11] for i, x in enumerate(self.original.analysis.segments)]\n\n print \"AVERAGES\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (avg(loudnesses),avg(brightnesses),avg(flatnesses),avg(attacks),avg(timbre5),avg(timbre6),avg(timbre7),avg(timbre8),avg(timbre9),avg(timbre10),avg(timbre11),avg(timbre12))\n print\n print \"STDVS\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (stddev(loudnesses),stddev(brightnesses),stddev(flatnesses),stddev(attacks),stddev(timbre5),stddev(timbre6),stddev(timbre7),stddev(timbre8),stddev(timbre9),stddev(timbre10),stddev(timbre11),stddev(timbre12))\n\n\n print \"\\tLoud\\tBright\\tFlat\\tAttack\\ttim5\\ttim6\\ttim7\\ttim8\\ttim9\\ttim10\\ttim11\\ttim12\"\n for segment in self.original.analysis.segments:\n if are_kicks(segment): print \"Kick\",\n elif are_snares(segment): print \"Snar\",\n elif are_hats(segment): print \"Hats\",\n else: print \"else\",\n print \"\\t%s\\t%s\\t%s\\t%s\\t%s\" % (segment.timbre[0], segment.timbre[1], segment.timbre[2], segment.timbre[3], segment.timbre[4])\n\n kicks = self.original.analysis.segments.that(are_kicks)\n #if kicks: kicks.encode('kicks.mp3')\n snares = self.original.analysis.segments.that(are_snares)\n #if snares: snares.encode('snares.mp3')\n hats = self.original.analysis.segments.that(are_hats)\n #if hats: hats.encode('hats.mp3')\n\n # Time to replace\n hat_sample = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n kick_sample = audio.AudioData(self.sample_path + self.template['kick'], sampleRate=44100, numChannels=2, verbose=False)\n snare_sample = audio.AudioData(self.sample_path + self.template['snare'], sampleRate=44100, numChannels=2, verbose=False)\n \n empty = audio.AudioData(ndarray=numpy.zeros(((self.original.sampleRate * self.original.analysis.duration), 2), dtype=numpy.int16), numChannels=2, sampleRate=44100)\n\n last = 0\n for segment in kicks:\n if last + len(kick_sample.data) > segment.start:\n print \"Adding kick at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(kick_sample.data)] += kick_sample.data\n last = segment.start\n\n last = 0\n for segment in snares:\n if last + len(snare_sample.data) > segment.start:\n print \"Adding snare at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(snare_sample.data)] += snare_sample.data \n last = segment.start\n for segment in hats:\n if last + len(hat_sample.data) > segment.start:\n print \"Adding hat at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(hat_sample.data)] += hat_sample.data\n last = segment.start\n\n audio.mix(empty, self.original, 0.5).encode('mixed.mp3')", "def DARP2016_MicArray():\n\n M = 36 # number of mics\n array_height = -0.49 # [m] (ref. to airfoil height at z=0)\n\n # mic coordinates (corrected for DARP2016 configuration)\n XYZ_array = np.array([[0., 0.025, 0.08477, 0.12044, 0.18311, 0.19394,\n 0.01559, 0.08549, 0.16173, 0.19659, 0.24426, -0.00556,\n 0.02184, 0.08124, 0.06203, 0.11065, -0.02252, -0.05825,\n -0.06043, -0.11924, -0.10628, -0.02252, -0.09449, -0.15659,\n -0.21072, -0.24318, -0.00556, -0.05957, -0.13484, -0.14352,\n -0.19696, 0.01559, 0.02021, -0.01155, 0.03174, -0.00242],\n [-0., -0., 0.04175, 0.11082, 0.10542, 0.15776,\n -0.01955, -0.04024, -0.02507, -0.07743, -0.05327, -0.02437,\n -0.09193, -0.14208, -0.20198, -0.22418, -0.01085, -0.0744,\n -0.1521, -0.17443, -0.22628, 0.01085, -0.00084, -0.04759,\n -0.01553, -0.05799, 0.02437, 0.07335, 0.09276, 0.15506,\n 0.15397, 0.01955, 0.09231, 0.16326, 0.20889, 0.24999],\n array_height*np.ones(M)])\n\n # calibration factors\n array_cal = np.array([73.92182641429085, 96.84446743391487, 85.48777846463159,\n 85.24410968090712, 83.63917149322562, 68.94090765134432,\n 79.2385037527723, 112.77357210746612, 84.8483307868491,\n 87.18956628936178, 97.75046920293282, 89.2829545690508,\n 79.51644155562396, 90.39403884030057, 80.71754629014218,\n 89.4418210091059, 98.33634233056068, 79.2212022850229,\n 91.25543447201031, 89.55040012572815, 85.77495667666254,\n 82.74418222820202, 84.63061055646973, 77.01568014644964,\n 95.52764533324982, 92.16734812591154, 95.27123074600838,\n 87.93335310521428, 96.65066131188675, 93.58564782091074,\n 78.1446818728945, 101.3047738767648, 83.68569643491034,\n 84.7981031520437, 94.40796508430756, 83.52266614867919])\n\n return XYZ_array, array_cal", "def masterDark(dark_list, bad_pix_method = 'MAD', sig_hot_pix = 5, output_dir = None):\n #Open all files into a 3D array\n print(\"Creating a master dark\")\n dark_cube = np.empty((len(dark_list),2048,2048))\n num=0\n for i in range(len(dark_list)):\n try:\n hdu = f.open(dark_list[i])\n dark_cube[i,:,:] = hdu[0].data\n hdu.close()\n num += 1\n except:\n print('File Error; moving on to next file.')\n dark_cube[i,:,:] = [([0]*2048)]*2048\n continue\n\n if num == 0:\n return None,None\n #Create the master dark\n master_dark = np.median(dark_cube, axis = 0)\n\n if bad_pix_method == 'sigma_clipping':\n hot_px = sigma_clip(master_dark, sigma = sig_hot_pix)\n elif bad_pix_method == 'MAD':\n MAD = np.median(np.abs(dark_cube - master_dark ), axis = 0) #compute MAD\n hot_px = sigma_clip(MAD, sigma = sig_hot_pix)\n elif bad_pix_method == 'standard_deviation':\n SD = np.std(dark_cube, axis = 0)\n hot_px = sigma_clip(SD, sigma = sig_hot_pix)\n else:\n print('%s is in valid, use MAD instead'%bad_pix_method)\n MAD = np.median(np.abs(dark_cube - master_dark ), axis = 0) #compute MAD\n hot_px = sigma_clip(MAD, sigma = sig_hot_pix)\n\n\n #zero_px = master_dark == 0.\n\n bad_px = hot_px.mask #| zero_px\n\n #Stick it back in the last hdu\n hdu[0].data = master_dark\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created master dark by median combining the following frames\"\n for i in range(len(dark_list)):\n hdu[0].header['HISTORY'] = dark_list[i]\n hdu[0].header['HISTORY'] = \"############################\"\n\n #Parse the last fileanme\n if output_dir is not None:\n dark_outname = dark_list[-1].rsplit('.',1)[0]+\"_master_dark.fits\"\n dark_outname = dark_outname.rsplit(\"/\",1)[-1]\n dark_outname = output_dir+dark_outname\n else:\n dark_outname = dark_list[-1].rsplit('.',1)[0]+\"_master_dark.fits\"\n\n print((\"Writing master dark to {}\".format(dark_outname)))\n #Write the fits file\n hdu.writeto(dark_outname, overwrite=True)\n\n #Stick it back in the last hdu\n #hdu[0].data = np.array(bad_px, dtype=float)*2\n hdu[0].data = np.array(bad_px, dtype=float) #this is for new version, separate maps from dark and flat\n\n #Add history keywords\n #Add history keywords\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created hot pixel map by {}: {}\".format(bad_pix_method, dark_outname)\n hdu[0].header['HISTORY'] = \"Bad pixel cutoff of {}sigma\".format(sig_hot_pix)\n hdu[0].header['HISTORY'] = \"A pixel value of 1 indicates a hot pixel\"\n hdu[0].header['HISTORY'] = \"############################\"\n\n #Parse the last filename\n if output_dir is not None:\n bp_outname = dark_list[-1].rsplit('.',1)[0]+\"_hp_map.fits\"\n bp_outname = bp_outname.rsplit(\"/\",1)[-1]\n bp_outname = output_dir+bp_outname\n else:\n bp_outname = dark_list[-1].rsplit('.',1)[0]+\"_hp_map.fits\" #hp map is from dark, as oppose to bp map from flat\n\n print((\"Writing master dark to {}\".format(bp_outname)))\n #Write the fits file\n hdu.writeto(bp_outname, overwrite=True)\n\n return dark_outname, bp_outname", "def desktop(path, calfile=None, transitspeed=3,\n soundspeed=None, absorption=None):\n # Get list of RAW files, and the calibration file\n rawfiles= np.sort(glob.glob(os.path.join(path, '*.raw'))) \n if rawfiles.size==0:\n raise Exception('No RAW files in directory %s' % path)\n \n # Preallocate variables and iterate through RAW files\n logname = dt.now().strftime('D%Y%m%d-T%H%M%S')\n preraw = None\n rawpile = None\n for rawfile in rawfiles:\n \n # Try to read, process and report\n try:\n \n # read RAW file\n raw = read.raw(rawfile, calfile=calfile, \n transitspeed=transitspeed,\n soundspeed=soundspeed, absorption=absorption,\n preraw=preraw) \n preraw = raw.copy()\n \n # if raw is continuous with preceeding data...\n if raw['continuous']:\n \n # pile up current raw in the rawpile...\n if rawpile is not None:\n rawpile = read.join(rawpile, raw) \n \n # or start a new rawpile if not created yet\n else:\n rawpile = raw.copy() \n \n # or start a new rawpile if raw is not continuous \n else:\n rawpile = raw.copy()\n prepro = None\n jdx = [0,0]\n \n # Process rawpile if vessels is moving...\n if rawpile['transect']>0:\n \n # Process rawpile if it's got at least 1 nmi...\n if rawpile['nm'][-1]-rawpile['nm'][0]>1:\n pro = process.ccamlr(rawpile, prepro=prepro, jdx=jdx)\n \n # Report results\n report.console(pro)\n report.log(pro, logname)\n \n prepro = rawpile \n jdx = process.next_jdx(pro)\n rawpile = None\n \n # or report it hasn't got 1 nmi yet\n else:\n logger.info('Processing pending: at least 1 nmi required')\n \n # or report the vessel is not moving, and reset parameters \n else:\n logger.info('Processing skipped: platform not in transit')\n rawpile = None\n prepro = None\n jdx = [0,0]\n \n # free up memory RAM\n if 'raw' in locals(): del raw\n if 'pro' in locals(): del pro\n gc.collect()\n \n # log error if process fails and reset rawpile \n except Exception:\n logger.error('Failed to process file', exc_info=True)\n rawpile = None", "def read_image_and_angle(root_path, data, camera, index):\n image_path = os.path.join(root_path,\n data[cameras[camera]].values[index].strip())\n image = plt.image.imread(image_path)\n angle = data.steering.values[index] + cameras_correction[camera]\n\n return image, angle", "def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )", "def to_spectral_img(data):\n assert data.size(-1) == 2\n\n spectral_vol = torch.zeros([data.size(-2), data.size(-2), data.size(-2)])\n\n for i in range(data.size(-2)):\n kspc1 = torch.zeros(data.size())\n kspc1[:, i, :] = data[:, i, :]\n img1 = ifft2(kspc1)\n img1_abs = complex_abs(img1)\n\n spectral_vol[i, :, :] = img1_abs\n\n return spectral_vol", "def run_psavg_sims(bursttimefile):\n\n nfolder = [5,6,8,12]\n datadirs = [\"P20165/20165-01-01-000\", \"P20165/20165-01-01-001\", \"P20165/20165-01-01-002\",\n \"P10223/10223-01-03-01\", \"P10223/10223-01-03-010\" ]\n\n data_all, unbary_all, tstart_all, tend_all, t0_all, pcus_all, std1dir_all = [], [], [], [], [], [], []\n\n for d in datadirs:\n print(\"I am on directory %s\" %d)\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div8192*.asc\")\n if len(files) == 0:\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div-32768s*.asc\")\n if len(files) == 0:\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div8*.asc\")\n #print(\"File to use %s\" %files[0])\n data = rxte.RXTEData(times=None, channels=None, datafile=files[0], npcus=None, ra=None, dec=None, emid=None, emiddir=None, bary=True)\n\n len_datafile = len(files[0].split(\"/\")[-1])\n len_processed = len(files[0].split(\"/\")[-2])\n std1dir_all.append(files[0][:-(len_datafile+len_processed+1)])\n\n data_all.append(np.array([p.time for p in data.photons])+data.t0)\n unbary_all.append(np.array([p.unbary for p in data.photons])+data.t0)\n tstart_all.append(data.photons[0].unbary+data.t0)\n tend_all.append(data.photons[-1].unbary+data.t0)\n t0_all.append(data.t0)\n pcus_all.append(data.pcus)\n\n t0_sorted, tstart_sorted, tend_sorted, data_sorted, pcus_sorted, std1dir_sorted, unbary_sorted = \\\n zip(*sorted(zip(t0_all, tstart_all, tend_all, data_all, pcus_all, std1dir_all, unbary_all)))\n t0_sorted = np.array(t0_sorted)\n\n psno = [5,6,8,12]\n m_all = [30, 23, 23, 50]\n\n for n,m in zip(psno, m_all):\n psavg_all = sgr1900_results.make_randomly_sampled_periodograms(datadirs, bursttimefile, m, n=1000,\n save_step=100, fileroot=\"sgr1806_psavg%i\"%n,\n data_sorted=data_sorted, t0_sorted=t0_sorted,\n pcus_sorted=pcus_sorted, tend_sorted=tend_sorted,\n tstart_sorted=tstart_sorted,\n unbary_sorted=unbary_sorted)\n\n return", "def read_statistics(self):\n self.psdata=[]\n self.powerspectra=[]\n self.ds=[]\n self.dsigmasq=[]\n self.dsigma=[]\n self.bsdata=[]\n self.eqbispectra=[]\n self.fNLeq=[]\n\n for sub in range(self.Nsubs):\n self.psdata.append(np.load(self.datadir+self.filebase+\"_\"+str(sub)+\".npy\"))\n self.powerspectra.append(np.trim_zeros(self.psdata[-1][0][1:]))\n self.bsdata.append(np.load(self.datadir+self.fbbispec+\"_\"+str(sub)+\".npy\"))\n self.eqbispectra.append(self.bsdata[-1][0][1:len(self.powerspectra[-1])])\n\n self.ds.append(np.load(self.datadir+\"stat_\"+str(sub)+\".npy\")[0])\n self.dsigmasq.append(np.load(self.datadir+\"stat_\"+str(sub)+\".npy\")[1])\n self.dsigma = np.array([np.sqrt(dsq) for dsq in self.dsigmasq])\n\n self.klist=np.arange(1, len(self.powerspectra[-1]))*(2.*np.pi/self.Lsub)\n # subtract the mean ds\n self.ds = self.ds - np.mean(self.ds)\n self.fNLeq=np.mean(self.eqbispectra, axis=0)\n self.fNLeqsubs=np.mean(self.eqbispectra, axis=1)\n self.fNLeqds=[]\n for i in range(len(self.eqbispectra)):\n self.fNLeqds.append(np.array([self.ds[i]*self.eqbispectra[i][j] for j in range(45)]))", "def compare_averages_shell_pspec_dft():\n\n select_radius = 5. #degrees\n\n Nside=256\n Npix = 12 * Nside**2\n Omega = 4*np.pi/float(Npix)\n\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420/freqs - 1.\n\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n\n dV = comoving_voxel_volume(Z[Nfreq/2], dnu, Omega)\n variances = []\n means = []\n pks = []\n\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n\n steps = range(10,110,10)\n vmin,vmax = min(steps),max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n\n for n in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, select_radius, freqs=freqs, Nkbins=Nkbins, N_sections=n, cosmo=True, method='dft', error=False)\n variances.append(np.var(pk[0:Nkbins-5]))\n means.append(np.mean(pk[0:Nkbins-5]))\n pks.append(pk)\n ax0.plot(kbins, pk, label=str(n), color=colormap(normalize(n)))\n\n ax0.axhline(y=dV*sig**2, color='k', lw=2.0)\n# ax0.legend()\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable,label=r'Number of snapshots', ax=ax0)\n ax0.set_ylabel(r\"P(k) [mK$^2$ Mpc$^{3}]$\")\n ax0.set_xlabel(r\"k [Mpc$^{-1}]$\")\n ax1.plot(steps, np.array(variances), label=\"Variance\")\n ax1.set_ylabel(r\"Variance(P(k)) [mK$^4$ Mpc$^{6}]$\")\n ax1.set_xlabel(u\"Number of 5° snapshots\")\n ax3.plot(steps, means, label=\"Mean\")\n ax3.set_ylabel(r\"Mean(P(k)) [mK$^2$ Mpc$^{3}]$\")\n ax3.set_xlabel(u\"Number of 5° snapshots\")\n ax1.legend()\n ax3.legend()\n im = ax2.imshow(np.array(pks)[:,0:Nkbins-5], aspect='auto')#, norm=mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n print('Fractional deviation: ', np.mean(np.abs(pk - dV*sig**2)))\n pl.show()", "def compute_metrics_on_directories_raw(dir_gt, dir_pred):\n\n lst_gt = sorted(glob(os.path.join(dir_gt, '*')), key=natural_order)\n lst_pred = sorted(glob(os.path.join(dir_pred, '*')), key=natural_order)\n\n res = []\n cardiac_phase = []\n file_names = []\n\n measure_names = ['Dice LV', 'Volume LV', 'Err LV(ml)',\n 'Dice RV', 'Volume RV', 'Err RV(ml)', 'Dice MYO', 'Volume MYO', 'Err MYO(ml)',\n 'Hausdorff LV', 'Hausdorff RV', 'Hausdorff Myo',\n 'ASSD LV', 'ASSD RV', 'ASSD Myo']\n\n res_mat = np.zeros((len(lst_gt), len(measure_names)))\n\n ind = 0\n for p_gt, p_pred in zip(lst_gt, lst_pred):\n if os.path.basename(p_gt) != os.path.basename(p_pred):\n raise ValueError(\"The two files don't have the same name\"\n \" {}, {}.\".format(os.path.basename(p_gt),\n os.path.basename(p_pred)))\n\n\n gt, _, header = load_nii(p_gt)\n pred, _, _ = load_nii(p_pred)\n zooms = header.get_zooms()\n res.append(metrics(gt, pred, zooms))\n cardiac_phase.append(os.path.basename(p_gt).split('.nii.gz')[0].split('_')[-1])\n\n file_names.append(os.path.basename(p_pred))\n\n res_mat[ind, :9] = metrics(gt, pred, zooms)\n\n for ii, struc in enumerate([3,1,2]):\n\n gt_binary = (gt == struc) * 1\n pred_binary = (pred == struc) * 1\n\n res_mat[ind, 9+ii] = hd(gt_binary, pred_binary, voxelspacing=zooms, connectivity=1)\n res_mat[ind, 12+ii] = assd(pred_binary, gt_binary, voxelspacing=zooms, connectivity=1)\n\n ind += 1\n\n return res_mat, cardiac_phase, measure_names, file_names", "def hotaverage( names):\n rs = radioastronomy.Spectrum() # create input and average structures\n nhot = 0\n\n avenames = names # create a list of files to average\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'HOT': # speed up by only looking at hot load files\n continue\n \n rs.read_spec_ast(filename)\n\n if rs.telel > 0: # only working with hot load, skip elevation > 0.\n continue\n\n avenames[nhot] = filename\n nhot = nhot + 1\n # end of for all files loop\n\n nhot, hot = average( avenames[0:nhot]) # now use generic program for averages\n if nhot < 1:\n print 'No hot load files; can not calibrate!'\n exit()\n\n return nhot, hot", "def system_7(seed_img_file, in_dir, out_dir, threshold, num_frames, num_prev_frames, blend_coef, blur=(3,3), as_numeric=True, stretched=True):\n pass", "def calc_psd(self):\n psd2d = np.array(self.calc_psd2d())\n\n print(\"Azimuthally averaging 2D power spectral density ... \",\n end=\"\", flush=True)\n dim = self.shape[0]\n dim_half = (dim+1) // 2\n # NOTE:\n # The zero-frequency component is shifted to position of index\n # (0-based): (ceil((n-1) / 2), ceil((m-1) / 2))\n px = np.arange(dim_half-dim, dim_half)\n x, y = np.meshgrid(px, px)\n rho = np.sqrt(x**2 + y**2)\n\n radii = self.radii\n nr = len(radii)\n if nr > 100:\n print(\"\\n ... %d data points, may take a while ... \" % nr,\n end=\"\", flush=True)\n else:\n print(\" %d data points ... \" % nr, end=\"\", flush=True)\n psd1d = np.zeros(shape=(nr, 4))\n psd1d[:, 0] = self.frequencies\n\n for i, r in enumerate(radii):\n if (i+1) % 100 == 0:\n percent = 100 * (i+1) / nr\n print(\"%.1f%% ... \" % percent, end=\"\", flush=True)\n ii, jj = (rho <= r).nonzero()\n rho[ii, jj] = np.inf\n cells = psd2d[ii, jj]\n psd1d[i, 3] = len(cells)\n if self.meanstd:\n psd1d[i, 1] = np.mean(cells)\n psd1d[i, 2] = np.std(cells)\n else:\n median = np.median(cells)\n mad = np.median(np.abs(cells - median))\n psd1d[i, 1] = median\n psd1d[i, 2] = mad * 1.4826\n print(\"DONE\", flush=True)\n\n self.psd1d = psd1d\n return psd1d", "def calculate_dark_current(image, i, int_time):\n dark_data_dir = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Integration_Sweep\\Dark'\n data_path_name_split = image.split('_')\n #print(data_path_name_split)\n all_int_files = [each for each in os.listdir(dark_data_dir) \\\n if each.endswith('_'+data_path_name_split[-1])] \n print(all_int_files)\n \n dark_data_file = os.path.join(dark_data_dir, all_int_files[0])\n IDL_variable = readsav(dark_data_file) \n all_full_frame = IDL_variable.q \n quad = all_full_frame[:, i, :, :]\n active_quad = np.mean(quad[:, 4:1028, 10:1034], axis=0) \n tsoc = np.mean(quad[:, 4:1028, 1034:1056], axis=0)\n bias_subtracted_quad = perform_bias_subtraction_ave(active_quad, tsoc)\n smear_subtracted_quad, smear_signal = perform_smear_subtraction(bias_subtracted_quad[10:1000, :], int_time)\n return smear_subtracted_quad", "def current_average_luma(camera):\n camera.capture('/home/pi/Desktop/image1.jpg')#camera take picture\n img = Image.open(\"/home/pi/Desktop/image1.jpg\") #opens image\n \n luma=0 #sum of the lumenance of each pixels\n pixels = img.width*img.height #number of pixels\n \n for x in range(img.width):\n for y in range(img.height):\n (r, g, b) = img.getpixel((x,y))#get colour touple \n luma += (0.2126*r + 0.7152*g + 0.0722*b) #calculate luma of RGB data, then add to total\n #END for\n #END for\n \n img.close()#ensure to properly close the image\n return luma/pixels #return average of all pixels", "def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image", "def rotate_images(data_folder, rots_per_pic):\n\n\tprint \"Rotating images...\"\n\n\t#search for images in folder iteratively\n\told_paths = []\n\tfor folder, subs, files in os.walk(data_folder):\n\t\tfor filename in files:\n\t\t\tif filename.endswith('.png') or filename.endswith('.jpg'):\n\t\t\t\told_paths.append(os.path.join(folder, filename))\n\t#sorts the paths obtained\n\told_paths.sort()\n\n\told_paths_with_sums = {}\n\n\tfor filename in old_paths:\n\t\told_paths_with_sums[filename] = 0\n\n\t#counts how many times the images were already processed \n\tnew_paths = []\n\tall_files_sum = 0\n\talready_processed_sum = 0\n\tfor filename in old_paths:\n\t\tif \"processed\" not in filename:\n\t\t\tall_files_sum = all_files_sum + 1\n\t\t\tnew_paths.append(filename)\n\t\t\tprint('File found:')\n\t\t\tprint filename\n\t\telse:\n\t\t\talready_processed_sum = already_processed_sum + 1\n\t\t\tmatching = [s for s in new_paths if ((filename.partition(\"_processed_\")[0]+\".png\")==s or (filename.partition(\"_processed_\")[0]+\".jpg\")==s)]\n\t\t\tfor i in matching:\n\t\t\t\told_paths_with_sums[i] = old_paths_with_sums[i] + 1\n\t\t\t\tif old_paths_with_sums[i] >= rots_per_pic:\n\t\t\t\t\tnew_paths.remove(i)\n\t\t\t\t\tprint('File already processed '+str(old_paths_with_sums[i])+' time(s):')\n\t\t\t\t\tprint(i)\n\t\t\t\telse:\n\t\t\t\t\tprint('File processed '+str(old_paths_with_sums[i])+' time(s):')\n\t\t\t\t\tprint(i)\n\n\tprocessed_sum = 0\n\ttoo_big_angles_sum = 0\n\tno_desc_found_sum = 0\n\tmarkers_out_of_mesh = 0\n\n\tfor current_path in new_paths:\n\t\t#rotates image as many times as needed to achieve the desired number of rotations\n\t\tfor i in range(int(rots_per_pic) - old_paths_with_sums[current_path]):\n\t\t\tpath = current_path\n\t\t\t\n\t\t\t#loads files generated by Zface if they exist and are not empty\n\t\t\tif (os.path.isfile(path+'.mesh3D') and\n\t\t\t\tos.path.isfile(path+'.mesh2D') and\n\t\t\t\tos.path.isfile(path+'.ctrl2D') and\n\t\t\t\tos.path.isfile(path+'.pars') and\n\t\t\t\tos.stat(path+'.mesh3D').st_size != 0 and\n\t\t\t\tos.stat(path+'.mesh2D').st_size != 0 and\n\t\t\t\tos.stat(path+'.ctrl2D').st_size != 0 and\n\t\t\t\tos.stat(path+'.pars').st_size != 0):\n\t\t\t\tsrc3 = np.loadtxt(path+'.mesh3D')\n\t\t\t\tsrc2 = np.loadtxt(path+'.mesh2D')\n\t\t\t\tctrl2 = np.loadtxt(path+'.ctrl2D')\n\t\t\t\tscale = np.loadtxt(path+'.pars')[0]\n\t\t\t\ttranslx = np.loadtxt(path+'.pars')[1]\n\t\t\t\ttransly = np.loadtxt(path+'.pars')[2]\n\t\t\t\tpitch = np.loadtxt(path+'.pars')[3]\n\t\t\t\tyaw = np.loadtxt(path+'.pars')[4]\n\t\t\t\troll = np.loadtxt(path+'.pars')[5]\n\n\t\t\t\t#tests wether or not initial rotation is too large\n\t\t\t\tif (abs(yaw)<radians(30) and abs(pitch)<radians(15)):\n\n\t\t\t\t\timage = data.load(path)\n\t\t\t\t\trows, cols = image.shape[0], image.shape[1]\n\n\t\t\t\t\tx = src3[:,0]\n\t\t\t\t\ty = src3[:,1]\n\t\t\t\t\tz = src3[:,2]\n\n\t\t\t\t\t#transform 3D mesh from normalized space and rotation to actual space and rotation\n\t\t\t\t\tx = x*cos(roll)+y*-sin(roll)\n\t\t\t\t\ty = x*sin(roll)+y*cos(roll)\n\t\t\t\t\tz = z\n\n\t\t\t\t\tx = x*cos(yaw)+z*sin(yaw)\n\t\t\t\t\ty = y\n\t\t\t\t\tz = x*-sin(yaw)+z*cos(yaw)\n\n\t\t\t\t\tx = x\n\t\t\t\t\ty = y*cos(pitch)+z*-sin(pitch)\n\t\t\t\t\tz = y*sin(pitch)+z*cos(pitch)\n\n\t\t\t\t\tx = x*scale+translx\n\t\t\t\t\ty = y*scale+transly\n\n\t\t\t\t\t#ortographically projects the 3D mesh to 2D (this will be our source for the Piecewise Affine Transform)\n\t\t\t\t\tsrc_cols = x\n\t\t\t\t\tsrc_rows = y\n\n\t\t\t\t\tsrc_rows, src_cols = np.meshgrid(src_rows, src_cols, sparse=True)\n\t\t\t\t\tsrc = np.dstack([src_cols.flat, src_rows.flat])[0]\n\n\t\t\t\t\t#transforms it back to normalized space\n\t\t\t\t\tx = (x-translx)/scale\n\t\t\t\t\ty = (y-transly)/scale\n\n\t\t\t\t\t#rotates it back to 0 rotation\n\t\t\t\t\tyaw = -yaw\n\t\t\t\t\tpitch = -pitch\n\t\t\t\t\troll = -roll\n\n\t\t\t\t\t#adds random rotation\n\t\t\t\t\treal_yaw = radians(random.uniform(-30, 30))\n\t\t\t\t\treal_pitch = radians(random.uniform(-15, 15))\n\t\t\t\t\treal_roll = 0\n\n\t\t\t\t\tyaw = yaw + real_yaw\n\t\t\t\t\tpitch = pitch + real_pitch\n\t\t\t\t\troll = roll + real_roll\n\n\t\t\t\t\tx = x*cos(roll)+y*-sin(roll)\n\t\t\t\t\ty = x*sin(roll)+y*cos(roll)\n\t\t\t\t\tz = z\n\n\t\t\t\t\tx = x*cos(yaw)+z*sin(yaw)\n\t\t\t\t\ty = y\n\t\t\t\t\tz = x*-sin(yaw)+z*cos(yaw)\n\n\t\t\t\t\tx = x\n\t\t\t\t\ty = y*cos(pitch)+z*-sin(pitch)\n\t\t\t\t\tz = y*sin(pitch)+z*cos(pitch)\n\n\t\t\t\t\t#transforms it back to real space\n\t\t\t\t\tx = x*scale+translx\n\t\t\t\t\ty = y*scale+transly\n\n\t\t\t\t\t#orthographic projection of new coordinates will be the destination for PiecewiseAffineTransform\n\t\t\t\t\tdst_cols = x\n\t\t\t\t\tdst_rows = y\n\t\t\t\t\tdst = np.vstack([dst_cols, dst_rows]).T\n\n\t\t\t\t\tout_rows = rows\n\t\t\t\t\tout_cols = cols\n\n\t\t\t\t\t#looks for triangles formed by Delaunay triangularion, extracts the ones associated with each facial keypoint marker\n\t\t\t\t\ttform = PiecewiseAffineTransform()\n\t\t\t\t\tsrc_triangles, dst_triangles = tform.estimate(src[:,0:2], dst)\n\t\t\t\t\tctrl2_transforms = []\n\t\t\t\t\tfor current_ctrl2 in ctrl2:\n\t\t\t\t\t\tfor i in range(len(src_triangles)):\n\t\t\t\t\t\t\ttriangle = polygon.Path(src_triangles[i])\n\t\t\t\t\t\t\tif triangle.contains_point(current_ctrl2):\n\t\t\t\t\t\t\t\tctrl2_transforms.append(tform.affines[i])\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif len(ctrl2_transforms)!=49:\n\t\t\t\t\t\tmarkers_out_of_mesh = markers_out_of_mesh + 1\n\t\t\t\t\t\tprint \"didn't process image, because can't find all shape parameters:\"\n\t\t\t\t\t\tprint path\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tout_ctrl2 = []\n\t\t\t\t\tfor i in range(len(ctrl2_transforms)):\n\t\t\t\t\t\t\t#performs transformation on marker\n\t\t\t\t\t\t\tout_ctrl2.append(ctrl2_transforms[i](ctrl2[i]))\n\t\t\t\t\tout_ctrl2 = np.transpose((np.transpose(out_ctrl2)[0],np.transpose(out_ctrl2)[1]))\n\t\t\t\t\tout_ctrl2 = np.squeeze(out_ctrl2)\n\n\t\t\t\t\t#transforms image to the new surface triangle by triangle using Delaunay triangulation, then interpolation to smooth it out\n\t\t\t\t\ttform = PiecewiseAffineTransform()\n\t\t\t\t\ttform.estimate(dst, src[:,0:2])\n\t\t\t\t\tout_image = warp(image, tform, output_shape=(out_rows, out_cols))\n\n\t\t\t\t\tout_path = path[:-4]+'_processed'+'_yaw_'+str(real_yaw)+'_pitch_'+str(real_pitch)+'_roll_'+str(real_roll)+path[-4:]\n\n\t\t\t\t\t#saves image and marker points\n\t\t\t\t\timsave(out_path, out_image)\n\n\t\t\t\t\tnp.savetxt(out_path+'_0.txt', out_ctrl2)\n\n\t\t\t\t\tprocessed_sum = processed_sum + 1\n\t\t\t\t\tprint(str(processed_sum)+'. file processed:')\n\t\t\t\t\tprint(path)\n\t\t\t\telse:\n\t\t\t\t\ttoo_big_angles_sum = too_big_angles_sum + 1\n\t\t\t\t\tprint(\"didn't process image, because of too big original rotation:\")\n\t\t\t\t\tprint(path)\n\t\t\telse:\n\t\t\t\tno_desc_found_sum = no_desc_found_sum + 1\n\t\t\t\tprint(\"didn't process image, beacuse descriptor documents not found:\")\n\t\t\t\tprint(path)\n\n\tout_paths = []\n\tfor folder, subs, files in os.walk(data_folder):\n\t\tfor filename in files:\n\t\t\tif filename.endswith('.png') or filename.endswith('.jpg'):\n\t\t\t\tif \"processed\" in filename:\n\t\t\t\t\tout_path = os.path.join(folder, filename).replace(data_folder, \"\")\n\t\t\t\t\tout_paths.append(out_path)\n\n\t#writes paths of generated images into contents\n\tfilename = data_folder+'/contents'\n\n\twith open(filename, 'w') as f:\n\t\tf.write('\\n'.join(out_paths))\n\n\tprint \"Shuffling contents...\"\n\t#shuffles contents\n\tshuffle_contents(filename)\n\n\n\t#prints some statistics about the process on the screen\n\tprint\n\tprint(\"Statistics:\")\n\tprint(\"-----------\")\n\tprint(\"Files found: \"+str(all_files_sum))\n\tif all_files_sum != 0:\n\t\tprint(\"Already processed: \"+str(already_processed_sum))\n\t\tprint(\"Got processed now: \"+str(processed_sum))\n\t\tprint(\"All processed: \"+str((processed_sum+already_processed_sum)*100/all_files_sum)+\"%\")\n\t\tprint(\"Can't be processed because of too big angles: \"+str(too_big_angles_sum*100/all_files_sum)+\"%\")\n\t\tprint(\"Can't be processed because of no decriptors: \"+str(no_desc_found_sum*100/all_files_sum)+\"%\")\n\t\tprint(\"Can't be processed because of markers outside of mesh: \"+str(markers_out_of_mesh*100/all_files_sum)+\"%\")", "def calculation_time_analysis():\n\tfrom . import spectra as sp\n\tp_dict = {'Bfield':700,'rb85frac':1,'Btheta':88*np.pi/180,'Bphi':0*np.pi/180,'lcell':75e-3,'T':84,'Dline':'D2','Elem':'Cs'}\n\tchiL,chiR,chiZ = sp.calc_chi([-3500],p_dict)\n\t\n\tfor angle in [0, np.pi/32, np.pi/16, np.pi/8, np.pi/4, np.pi/2]:\n\t\tprint(('Angle (degrees): ',angle*180/np.pi))\n\t\tRotMat, n1, n2 = solve_diel(chiL,chiR,chiZ,angle)", "def add_spectogram_images(df, folder, countries):\n for country in countries:\n print(country)\n # Makes a dataframe subset and adds a column for the spectograms\n df_country = df.loc[df['NUTS_0'] == country]\n df_country[\"spectogram\"] = np.nan\n df_country[\"spectogram\"] = df_country[\"spectogram\"].astype(object)\n data_clean(country, folder)\n\n with open(folder + r\"\\spectra_ \" + country + \" .csv\") as fi:\n re = csv.reader(fi)\n np.array(next(re)[5:])\n\n for c, ro in enumerate(re):\n pointid = ro[2]\n if int(pointid) in df_country['Point_ID'].values:\n\n # make spectogram\n reflectance = np.array(ro[5:])\n r = savgol_filter(reflectance, 11, 2)\n f, t, Sxx = signal.spectrogram(r, 1)\n sxx_sdv = np.array(\n [[(Sxx[i][j] - np.mean(Sxx[i])) / np.std(Sxx[i]) for j in range(Sxx.shape[1])] for i in\n range(Sxx.shape[0])])\n fig, ax = plt.subplots()\n ax.axis('off')\n ax.pcolormesh(t, f, sxx_sdv, shading='gouraud') # , shading='gouraud'\n\n # save spectogram in RAM for faster saving\n io_buf = io.BytesIO()\n fig.savefig(io_buf, format='rgba', dpi=72)\n io_buf.seek(0)\n img_arr = np.reshape(np.frombuffer(io_buf.getvalue(), dtype=np.uint8),\n newshape=(int(fig.bbox.bounds[3]), int(fig.bbox.bounds[2]), -1))\n io_buf.close()\n plt.close()\n\n # cut white edges off image\n array = img_arr[35:-36]\n array = array[:, 54:-43]\n array = np.delete(array[:], -1, -1)\n # add spectogram do dataframe\n df_country.loc[df_country['Point_ID'] == int(pointid), ['spectogram']] = [array]\n\n # save dataframe in hdf5 file\n df_country = df_country.dropna()\n df_country.to_hdf(folder + r\"\\labeled_data.hdf5\", country)", "def _calc_avg_img(self, data: Union[Sequence[np.ndarray],\n Sequence[Sequence[np.ndarray]]]\n ) -> np.ndarray:\n summed = None\n cnt = 0\n for seq in data:\n if isinstance(seq, np.ndarray) and seq.ndim == 2:\n # seq is a single image, turn it into a sequence\n seq = [seq]\n\n for img in seq:\n # Sequence of image sequences\n norm_img = self._normalize_image(img)\n if summed is None:\n summed = norm_img\n else:\n summed += norm_img\n cnt += 1\n\n ret = summed / cnt\n return ret", "def sphere_l_intensity(img):\n pixels = []\n for j in range(0, img.shape[0]):\n for i in range(1, 40):\n pixels.append(img[j, i])\n\n return np.mean(pixels)", "def calculate_average_image(self,imlist):\n\t\t\n\t\tN=len(imlist)\n\t\t\n\t\tif self.mode == 'RGB':\n\t\t\tw,h,c=imlist[0].shape\n\t\t\tarr=np.zeros((h,w,3),theano.config.floatX)\n\t\telse:\n\t\t\tw,h=imlist[0].shape\t\t\n\t\t\tarr=np.zeros((h,w),theano.config.floatX)\n\n\t\tfor im in imlist:\n\t\t\timarr=np.array(im,dtype=theano.config.floatX)\n\t\t\ttry:\n\t\t\t\tarr=arr+imarr/N\n\t\t\texcept Exception, e:\n\t\t\t\tprint e\n\t\t\t\n\t\tarr=np.array(np.round(arr),dtype=np.uint8)\n\t\t#arr=np.array(np.round(arr),dtype=theano.config.floatX)\n\t\t#average_image=Image.fromarray(arr,mode=\"RGB\")\n\t\taverage_image=Image.fromarray(arr,mode=self.mode)\n\n\t\treturn average_image", "def main():\r\n\r\n ### Choose and Import File\r\n\r\n inSound = Sound()\r\n\r\n rate = inSound.rate\r\n data = inSound.data\r\n dataLength = len(data)\r\n \r\n info = inSound.get_info()\r\n head, filename = os.path.split(info[0]) # get filename of input\r\n \r\n # Decide output directory and filename\r\n outDir = r'out'\r\n outFile = os.path.join(outDir, 'out_'+filename)\r\n\r\n # Check if data has multiple channels, if yes use only one\r\n if(len(data.shape) > 1):\r\n data = data[:,0]\r\n\r\n\r\n ### Set All Parameters\r\n\r\n #get parameters from user dialogue\r\n params = getParameters()\r\n\r\n numChannels = params['numChannels'][0] # number of Channels\r\n loFreq = params['loFreq'][0] # lower bound on frequencies\r\n hiFreq = params['hiFreq'][0] # upper bound on frequencies\r\n plotChannels = params['plotChannels'][0] # if it should plot the Gammatone channels\r\n block_time = params['block_time'][0] # in ms\r\n block_shift = params['block_shift'][0] # in ms\r\n selectChannels = params['selectChannels'][0] # number of channels to activate at a single time\r\n\r\n\r\n ### Filter input file\r\n\r\n filtered, channel_fs = filterDataGamaTone(data, rate, numChannels, loFreq, hiFreq, plotChannels)\r\n\r\n\r\n ### Gammatones -> Stimulation Amplitude for time block\r\n\r\n samples_in_block = np.floor(block_time * rate / 1000).astype('int')\r\n samples_in_shift = np.floor(block_shift * rate / 1000).astype('int')\r\n\r\n summed = gammatoneToAmplitude(filtered, samples_in_block, samples_in_shift)\r\n\r\n # only activate the n electrodes that have the largest stimulation\r\n amps = n_largest_channels(summed, n=selectChannels)\r\n\r\n \r\n #### Sound reconstruction\r\n\r\n # for each timeblock we need to duplicate enough samples to fill it at sample rate\r\n amps_samples = np.repeat(amps, samples_in_shift, axis=1)\r\n #trim end to get same length as input\r\n amps_samples = amps_samples[:,:dataLength] \r\n\r\n # from amplitude samples and frequencies, reconstruct sound\r\n res_data = generateSound(amps_samples, channel_fs, rate)\r\n\r\n\r\n ### Write to output file\r\n write(outFile, rate, res_data)\r\n print('Wrote file to: \\n' + outFile)", "def astrometry_script(filename, catalog=\"PS\", rotation_scaling=True, xy_transformation=True, fine_transformation=True, images=False, vignette=3,vignette_rectangular=1., cutouts=None, ra=None, dec=None, projection_ra=None, projection_dec=None, verbose=False, save_images=False, ignore_header_rot=False, radius=-1., save_bad_result=False, silent=False, sigma_threshold_for_source_detection=5, high_res = False, hdul_idx=0, filename_for_sources=None, FWHM=4):\n #print(\"Program version: 1.2\")\n\n report = {}\n if(images):\n plt.ioff()\n warnings.simplefilter('ignore', UserWarning)\n fits_image_filename = filename\n\n print(\"> Astrometry for {} \".format(fits_image_filename))\n\n with fits.open(fits_image_filename) as hdul:\n #print(hdul.info())\n #print(hdul[0].header)\n\n hdu = hdul[hdul_idx]\n #hdu.verify('fix')\n hdr = hdu.header\n\n\n image_or = hdul[hdul_idx].data.astype(float)\n median = np.nanmedian(image_or)\n image_or[np.isnan(image_or)]=median\n image = image_or - median\n\n observation = find_sources(image, vignette,vignette_rectangular,cutouts, sigma_threshold_for_source_detection, FWHM=FWHM)\n #print(observation)\n\n #changed order of positions to [(x,y), (x,y),...] for compatibility with photutils 1.4\n xcenters = np.array(observation['xcenter'])\n ycenters = np.array(observation['ycenter'])\n positions = [(xcenters[i], ycenters[i]) for i in range(len(xcenters))]\n apertures = CircularAperture(positions, r=4.)\n\n\n #world coordinates\n if(not silent):\n print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n print(WCS(hdr))\n\n hdr[\"NAXIS1\"] = image.shape[0]\n hdr[\"NAXIS2\"] = image.shape[1]\n\n #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n wcsprm = WCS(hdr).wcs\n wcsprm_original = WCS(hdr).wcs\n wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, ra, dec,projection_ra, projection_dec, ignore_header_rot, radius)\n if(verbose):\n print(WCS(wcsprm.to_header()))\n coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n if(not PIXSCALE_UNCLEAR):\n if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n if(not silent):\n print(\"central value outside of the image, moving it to the center\")\n coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n #print(wcsprm)\n\n\n\n #better: put in nice wrapper! with repeated tries and maybe try synchron!\n if(not silent):\n print(\">Dowloading catalog data\")\n radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n catalog_data = query.get_data(coord, radius, catalog)\n report[\"catalog\"] = catalog\n #reference = reference.query(\"mag <20\")\n \n\n if(catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n if(not silent):\n print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n catalog_data2 = query.get_data(coord, radius, \"PS\")\n report[\"catalog\"] = \"PS\"\n catalog_data = pd.concat([catalog_data, catalog_data2])\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n if(not silent):\n print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n elif(catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n if(not silent):\n print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n report[\"catalog\"] = \"GAIA\"\n catalog_data = pd.concat([catalog_data, catalog_data2])\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n if(not silent):\n print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n\n max_sources = 400\n if(INCREASE_FOV_FLAG):\n max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n if(catalog_data.shape[0]>max_sources):\n catalog_data = catalog_data.nsmallest(400, \"mag\")\n #remove duplicates in catalog?\n\n apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n if(images):\n fig = plt.figure()\n fig.canvas.manager.set_window_title('Input for {}'.format(fits_image_filename))\n plt.xlabel(\"pixel x direction\")\n plt.ylabel(\"pixel y direction\")\n plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n apertures.plot(color='blue', lw=1.5, alpha=0.5)\n apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n\n plt.xlim(-200,image.shape[0]+200)\n plt.ylim(-200,image.shape[1]+200)\n if(save_images):\n name_parts = fits_image_filename.rsplit('.', 1)\n plt.savefig(name_parts[0]+\"_image_before.pdf\")\n\n ###tranforming to match the sources\n if(not silent):\n print(\"---------------------------------\")\n print(\">Finding the transformation\")\n if(rotation_scaling):\n if(not silent):\n print(\"Finding scaling and rotation\")\n wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=verbose)\n if(xy_transformation):\n if(not silent):\n print(\"Finding offset\")\n wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= verbose, silent=silent)\n\n #correct subpixel error\n compare_threshold = 3\n if(high_res):\n compare_threshold = 100\n obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=compare_threshold)#3\n if (len(distances) == 0): #meaning the list is empty\n best_score = 0\n else:\n rms = np.sqrt(np.mean(np.square(distances)))\n best_score = len(obs_x)/(rms+10) #start with current best score\n fine_transformation_success = False\n if(fine_transformation):\n print(\"Finding scaling and rotation\")\n lis = [2,3,5,8,10,6,4, 20,2,1,0.5]\n if(high_res):\n lis = [200,300,100,150,80,40,70, 20, 100, 30,9,5]\n skip_rot_scale = True\n for i in lis:\n wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i, compare_threshold=compare_threshold, skip_rot_scale=skip_rot_scale)\n if(i == 20):\n #only allow rot and scaling for the last few tries\n skip_rot_scale = False\n if(score> best_score):\n wcsprm = wcsprm_new\n best_score = score\n fine_transformation_success = True\n if not fine_transformation_success:\n if(not silent):\n print(\"Fine transformation did not improve result so will be discarded.\")\n else:\n if(not silent):\n print(\"Fine transformation applied to improve result\")\n #register.calculate_rms(observation, catalog_data,wcs)\n\n #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n wcs =WCS(wcsprm.to_header())\n if(verbose):\n print(wcs)\n from astropy.wcs import utils\n scales = utils.proj_plane_pixel_scales(wcs)\n #print(scales)\n cdelt = wcsprm.get_cdelt()\n #print(cdelt)\n scale_ratio = scales/cdelt\n #print(scale_ratio)\n pc = np.array(wcsprm.get_pc())\n pc[0,0] = pc[0,0]/scale_ratio[0]\n pc[1,0] = pc[1,0]/scale_ratio[1]\n pc[0,1] = pc[0,1]/scale_ratio[0]\n pc[1,1] = pc[1,1]/scale_ratio[1]\n wcsprm.pc = pc\n wcsprm.cdelt = scales\n\n #WCS difference before and after\n if(not silent):\n print(\"> Compared to the input the Wcs was changed by: \")\n scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n if(not silent):\n print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n #sources:\n #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n def unit_vector(vector):\n \"\"\" Returns the unit vector of the vector. \"\"\"\n return vector / max(np.linalg.norm(vector), 1e-10)\n def matrix_angle( B, A ):\n \"\"\" comment cos between vectors or matrices \"\"\"\n Aflat = A.reshape(-1)\n Aflat = unit_vector(Aflat)\n Bflat = B.reshape(-1)\n Bflat = unit_vector(Bflat)\n #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n #bugfix: multiplying by cdelt otherwise the calculated angle is off by a tiny bit\n rotation_angle = matrix_angle(wcsprm.get_pc()@wcsprm.get_cdelt(), wcsprm_original.get_pc()@wcsprm_original.get_cdelt()) /2./np.pi*360.\n if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n text = \"counterclockwise\"\n else:\n text = \"clockwise\"\n if(not silent):\n print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n if(not silent):\n print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n\n\n #check final figure\n if(images):\n fig = plt.figure()\n fig.canvas.manager.set_window_title('Result for {}'.format(fits_image_filename))\n plt.xlabel(\"pixel x direction\")\n plt.ylabel(\"pixel y direction\")\n plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n apertures.plot(color='blue', lw=1.5, alpha=0.5)\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n\n apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n if(save_images):\n name_parts = fits_image_filename.rsplit('.', 1)\n plt.savefig(name_parts[0]+\"_image_after.pdf\")\n if(not silent):\n print(\"--- Evaluate how good the transformation is ----\")\n dic_rms = register.calculate_rms(observation, catalog_data,wcsprm)\n #updating file\n converged = determine_if_fit_converged(dic_rms, catalog_data, observation, wcsprm, image.shape[0], image.shape[1], silent)\n report[\"converged\"] = converged\n report[\"matches\"] = dic_rms[\"matches\"]\n report[\"match_radius\"] = dic_rms[\"radius_px\"]\n if(converged or save_bad_result):\n write_wcs_to_hdr(fits_image_filename, wcsprm, report, hdul_idx=hdul_idx)\n if(filename_for_sources != None):\n wcs =WCS(wcsprm.to_header())\n observation_on_sky = wcs.wcs_pix2world(observation[[\"xcenter\",\"ycenter\"]], 1)\n #catalog_from_obs = np.zeros(observation_on_sky.shape[0], dtype={'names':('ra', 'dec', 'aperture_sum'),'formats':('f8', 'f8', 'f8')})\n catalog_from_obs = pd.DataFrame()\n catalog_from_obs[\"ra\"]= observation_on_sky[:,0]\n catalog_from_obs[\"dec\"]= observation_on_sky[:,1]\n catalog_from_obs[\"aperture_sum\"]= observation[\"aperture_sum\"]\n catalog_from_obs[\"mag\"]= -1.* observation[\"aperture_sum\"]#this is fine since we only use the mag to order the sources!\n catalog_from_obs.to_csv(filename_for_sources+\".csv\")\n if(images):\n plt.show()\n\n return converged, dic_rms #dictionary with short info about fit, \"matches\" gives a number of objects matched within certain radius", "def get_snapshot_list(self, base, snappref=\"SPECTRA_\"):\n #print('Looking for spectra in', base)\n powerspectra = FluxPower(maxk=self.max_k)\n for snap in range(30):\n snapdir = os.path.join(base,snappref+str(snap).rjust(3,'0'))\n #We ran out of snapshots\n if not os.path.exists(snapdir):\n snapdir = os.path.join(base,\"PART_\"+str(snap).rjust(3,'0'))\n if not os.path.exists(snapdir):\n snapdir = os.path.join(base, \"snap_\"+str(snap).rjust(3,'0'))\n if not os.path.exists(snapdir):\n continue\n #We have all we need\n if powerspectra.len() == np.size(self.zout):\n break\n try:\n ss = self._get_spectra_snap(snap, base)\n# print('Found spectra in', ss)\n if ss is not None:\n powerspectra.add_snapshot(snap,ss)\n except IOError:\n print(\"Didn't find any spectra because of IOError\")\n continue\n #Make sure we have enough outputs\n if powerspectra.len() != np.size(self.zout):\n raise ValueError(\"Found only\",powerspectra.len(),\"of\",np.size(self.zout),\"from snaps:\",powerspectra.snaps)\n return powerspectra", "def flatNoisePellicle():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/SolarBwPellicle/'\n d1,dx1 = met.read4DFits(wdir+'161209_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161209_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161209_Avg8_Meas3.fits')\n d4,dx4 = met.read4DFits(wdir+'161209_Avg8_Meas4.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f34,pow34 = fourier.meanPSD((d3-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f14,pow14 = fourier.meanPSD((d1-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f34,f14],[pow12,pow23,pow34,pow14])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f34,pow34/f34[0],label='3-4: %.2f' % midfreq[2])\n plt.loglog(f14,pow14/f14[0],label='1-4: %.2f' % midfreq[3])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: SolarB Flat+Pellicle')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12", "def img_series_stats(image_ccd_lst,plots_path,obsdate):\n median_count = []\n mean_count = []\n \n source_hdu = CCDData(image_ccd_lst[0],unit='adu')\n source_image_data = source_hdu.data.astype(float) \n source_image_hdr = source_hdu.header\n target_name = source_image_hdr['FIELD'].strip(' ')\n exptime = source_image_hdr['EXPTIME']\n chip_num = source_image_hdr['CHIP']\n \n for a_file in image_ccd_lst:\n hdu = CCDData(a_file,unit='adu')\n image_data = hdu.data.astype(float) \n image_hdr = hdu.header\n \n median_count.append(np.median(a_file))\n mean_count.append(np.mean(a_file))\n \n min_count_for_median = np.min(median_count)\n min_count_for_mean = np.min(mean_count)\n max_count_for_median = np.max(median_count)\n max_count_for_mean = np.max(mean_count)\n \n plt.figure()\n plt.plot(mean_count, label='mean',color=\"palevioletred\")\n plt.axhline(y=min_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='min mean {:.2f}'.format(min_count_for_mean),alpha=1)\n plt.axhline(y=max_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='max mean {:.2f}'.format(max_count_for_mean),alpha=1)\n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Mean pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_mean.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()\n\n plt.figure()\n plt.plot(median_count, label='median',color=\"darkviolet\")\n plt.axhline(y=min_count_for_median,linestyle='-',linewidth=0.5,color='red',label='min median {:.2f}'.format(min_count_for_median),alpha=1)\n plt.axhline(y=max_count_for_median,linestyle='-',linewidth=0.5,color='red',label='max median {:.2f}'.format(max_count_for_median),alpha=1) \n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Median pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_median.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()", "def calculate_mean_dark(data_dir):\n\n data = ([each for each in os.listdir(data_dir)\n if each.endswith('.h5')])\n \n all_data = []\n for num_data in data:\n #print(num_data)\n processed_data = os.path.join(data_dir, num_data)\n file = h5py.File(processed_data, 'r') \n data = file.get('Processed_data')\n all_data.append(data)\n #print\n\n all_data = np.array(all_data)\n all_data = np.mean(all_data, axis=0)\n return all_data", "def specPlot(image, qplot=True):\n # Load image, get dimensions\n im = readImage(image)\n L, W = im.shape\n xmin, xmax = (-W//2, W//2)\n ymin, ymax = (-L//2, L//2)\n xrng = range(xmin, xmax)\n yrng = range(ymin, ymax)\n\n # Calculate power spectrum\n spec = np.abs(fftshift(fft2(im)))**2\n\n # Calculate rotational average of spectrum\n [fx, fy] = np.meshgrid(xrng, yrng)\n sf = (np.sqrt(fx**2 + fy**2)).round().astype(int).flatten()\n rot_spec = np.bincount(sf, weights=spec.flatten()) / np.bincount(sf)\n rot_spec = rot_spec[1:min(L,W)//2]\n\n # Make a plot if requested\n if qplot:\n fig1, ax1 = plt.subplots()\n h = ax1.imshow(np.log10(spec), extent=[xmin, xmax, ymin, ymax])\n ax1.axis('off')\n cb = fig1.colorbar(h)\n cb.set_label(r'$\\log_{10}$(Energy)')\n\n fig2, ax2 = plt.subplots()\n ax2.loglog(np.arange(1, len(rot_spec)+1), rot_spec)\n ax2.set_xlabel('Spatial frequency (cycles/image)')\n ax2.set_ylabel('Energy')\n\n # Return\n if qplot:\n return spec, rot_spec, (fig1, ax1), (fig2, ax2)\n else:\n return spec, rot_spec", "def flattenFrames(stack):\n \n maxHeight=0\n frameList=[]\n \n \n print('\\n')\n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting shifts {:.2f}% done'.format(100.0*((i+1)/len(stack))),end='', flush=True)\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack" ]
[ "0.61825204", "0.5834751", "0.5748448", "0.5740321", "0.5725463", "0.57223636", "0.56904304", "0.5681795", "0.5607029", "0.5605107", "0.55935085", "0.55795133", "0.5555331", "0.55550176", "0.5546603", "0.55278903", "0.5524576", "0.546944", "0.54588705", "0.54191047", "0.54079944", "0.54061735", "0.5395522", "0.5362478", "0.5326585", "0.5314448", "0.5307331", "0.52968687", "0.528289", "0.5266818", "0.5266321", "0.5266288", "0.52645767", "0.5247218", "0.52460593", "0.5245156", "0.5232705", "0.52190804", "0.5211079", "0.5210932", "0.52066475", "0.52053756", "0.5205325", "0.5203669", "0.51900536", "0.5188819", "0.5185826", "0.5175143", "0.51734203", "0.51707554", "0.5161773", "0.51554006", "0.51508945", "0.5148637", "0.5146129", "0.51447177", "0.51194674", "0.5118541", "0.5113186", "0.51129794", "0.51102096", "0.51089066", "0.51072043", "0.51059854", "0.5103082", "0.5093077", "0.50929123", "0.50873184", "0.50784457", "0.5076922", "0.50754166", "0.5074704", "0.5072368", "0.5065851", "0.5061594", "0.5051425", "0.50487196", "0.5036799", "0.50360924", "0.5035003", "0.5034257", "0.5020139", "0.5019195", "0.5013665", "0.50101805", "0.5008413", "0.50052226", "0.5003192", "0.5002154", "0.50009525", "0.4997759", "0.49952835", "0.49925548", "0.49911967", "0.49902043", "0.49892414", "0.4988383", "0.4984094", "0.49786463", "0.49763867", "0.497391" ]
0.0
-1
Gather the a list of EMData on all nodes to the main node, we assume the list has the same length on each node.
def gather_EMData(data, number_of_proc, myid, main_node): from mpi import MPI_COMM_WORLD, MPI_INT, MPI_TAG_UB from mpi import mpi_send, mpi_recv l = len(data) gathered_data = [] inc = 1 # A temp measure if myid == main_node: for i in xrange(0, number_of_proc*inc, inc): if i == main_node: for k in xrange(l): gathered_data.append(data[k]) else: for k in xrange(l): im = recv_EMData(i, i*l+k) mem_len = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD) members = mpi_recv(int(mem_len[0]), MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD) members = map(int, members) im.set_attr('members', members) gathered_data.append(im) else: for k in xrange(l): send_EMData(data[k], main_node, myid*l+k) mem = data[k].get_attr('members') mpi_send(len(mem), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) mpi_send(mem, len(mem), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) return gathered_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather(self, node):\n\n return []", "def _data_parallel_master(self, intermediates):\n\n # Always using same \"device order\" makes the ReduceAdd operation faster.\n # Thanks to:: Tete Xiao (http://tetexiao.com/)\n intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())\n\n to_reduce = [i[1][:2] for i in intermediates]\n to_reduce = [j for i in to_reduce for j in i] # flatten\n target_gpus = [i[1].sum.get_device() for i in intermediates]\n\n sum_size = sum([i[1].sum_size for i in intermediates])\n sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)\n mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)\n\n broadcasted = Broadcast.apply(target_gpus, mean, inv_std)\n\n outputs = []\n for i, rec in enumerate(intermediates):\n outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))\n\n return outputs", "def all_gather(data):\n world_size = get_world_size()\n if world_size == 1:\n return [data]\n\n if type(data) is torch.Tensor:\n data = data.cpu()\n # serialized to a Tensor\n buffer = pickle.dumps(data)\n storage = torch.ByteStorage.from_buffer(buffer)\n tensor = torch.ByteTensor(storage).to(\"cuda\")\n\n # obtain Tensor size of each rank\n local_size = torch.LongTensor([tensor.numel()]).to(\"cuda\")\n size_list = [torch.LongTensor([0]).to(\"cuda\") for _ in range(world_size)]\n dist.all_gather(size_list, local_size)\n size_list = [int(size.item()) for size in size_list]\n max_size = max(size_list)\n\n # receiving Tensor from all ranks\n # we pad the tensor because torch all_gather does not support\n # gathering tensors of different shapes\n tensor_list = []\n for _ in size_list:\n tensor_list.append(torch.ByteTensor(size=(max_size,)).to(\"cuda\"))\n if local_size != max_size:\n padding = torch.ByteTensor(size=(max_size - local_size,)).to(\"cuda\")\n tensor = torch.cat((tensor, padding), dim=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.cpu().numpy().tobytes()[:size]\n data = pickle.loads(buffer)\n if type(data) is torch.Tensor:\n data = data.to(\"cuda\")\n data_list.append(data)\n\n return data_list", "def all_gather(data):\n world_size = dist.get_world_size()\n if world_size == 1:\n return [data]\n\n buffer = pickle.dumps(data) #write data into Bytes and stores in buffer\n np_buffer = np.frombuffer(buffer, dtype=np.int8)\n tensor = paddle.to_tensor(np_buffer, dtype='int32') # uint8 doese not have many ops in paddle\n\n # obtain Tensor size of each rank\n local_size = paddle.to_tensor([tensor.shape[0]])\n size_list = []\n dist.all_gather(size_list, local_size)\n max_size = max(size_list)\n\n # receiving tensors from all ranks, \n # all_gather does not support different shape, so we use padding\n tensor_list = []\n if local_size != max_size:\n padding = paddle.empty(shape=(max_size - local_size, ), dtype='int32')\n tensor = paddle.concat((tensor, padding), axis=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.astype('uint8').cpu().numpy().tobytes()[:size]\n data_list.append(pickle.loads(buffer))\n\n return data_list", "def prepare_data_for_d(self):\n\n center_nodes = []\n neighbor_nodes = []\n labels = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n # self.graph[i] = [neighbors of i]\n pos = self.graph[i]\n neg, _ = self.sample(i, self.trees[i], len(pos), for_d=True)\n # print(\"tree_i_d: \", self.trees[i])\n # print(\"neg_samples: \", neg)\n # print(\"neg is: \", neg)\n if len(pos) != 0 and neg is not None:\n # positive samples\n center_nodes.extend([i] * len(pos))\n neighbor_nodes.extend(pos)\n labels.extend([1] * len(pos))\n\n # negative samples\n center_nodes.extend([i] * len(pos))\n neighbor_nodes.extend(neg)\n labels.extend([0] * len(neg))\n # print(\"cen: \", center_nodes)\n return center_nodes, neighbor_nodes, labels", "def data_nodes(self):\n data_nodes = []\n for node in self.nodes:\n if 'datanode' == node.get('infos').get('type'):\n data_nodes.append(node)\n return data_nodes", "def collect(self):\n while self.proc is not None:\n self.read()\n if not len(self.datalines):\n return\n while len(self.datalines):\n # pop the first node of list\n yield self.datalines.pop(0)", "def ndata(self):\n raise Exception(\"Graph store doesn't support access data of all nodes.\")", "def compute_nodeset(data):\n xset = NodeSet()\n for nodeset in data.split():\n xset.update(nodeset)\n return xset", "def compute_embeddings(model, opts, data):\n node_embeddings = []\n node_scores = []\n # batch size is 1 for computing embeddings\n dataloader = DataLoader(dataset=data, batch_size=1, shuffle=True, num_workers=16)\n model.eval()\n print(\"computing embeddings...\")\n with torch.no_grad():\n for batch in tqdm(dataloader):\n batch.to(opts.device)\n batch_scores, batch_embeddings = model(batch, compute_embeddings=False)\n node_embeddings.append(batch_embeddings)\n node_scores.append(batch_scores)\n # input('enter for embeddings')\n # print(node_embeddings)\n\n return torch.stack(node_scores), torch.stack(node_embeddings)", "def all_gather_list(data, group=None, max_size=16384):\n SIZE_STORAGE_BYTES = 4 # int32 to encode the payload size\n\n enc = pickle.dumps(data)\n enc_size = len(enc)\n\n if enc_size + SIZE_STORAGE_BYTES > max_size:\n raise ValueError(\n 'encoded data exceeds max_size, this can be fixed by increasing buffer size: {}'.format(enc_size))\n\n rank = get_rank()\n world_size = get_world_size()\n buffer_size = max_size * world_size\n\n if not hasattr(all_gather_list, '_buffer') or \\\n all_gather_list._buffer.numel() < buffer_size:\n all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)\n all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()\n\n buffer = all_gather_list._buffer\n buffer.zero_()\n cpu_buffer = all_gather_list._cpu_buffer\n\n assert enc_size < 256 ** SIZE_STORAGE_BYTES, 'Encoded object size should be less than {} bytes'.format(\n 256 ** SIZE_STORAGE_BYTES)\n\n size_bytes = enc_size.to_bytes(SIZE_STORAGE_BYTES, byteorder='big')\n\n cpu_buffer[0:SIZE_STORAGE_BYTES] = torch.ByteTensor(list(size_bytes))\n cpu_buffer[SIZE_STORAGE_BYTES: enc_size + SIZE_STORAGE_BYTES] = torch.ByteTensor(list(enc))\n\n start = rank * max_size\n size = enc_size + SIZE_STORAGE_BYTES\n buffer[start: start + size].copy_(cpu_buffer[:size])\n\n all_reduce(buffer, group=group)\n\n try:\n result = []\n for i in range(world_size):\n out_buffer = buffer[i * max_size: (i + 1) * max_size]\n size = int.from_bytes(out_buffer[0:SIZE_STORAGE_BYTES], byteorder='big')\n if size > 0:\n result.append(pickle.loads(bytes(out_buffer[SIZE_STORAGE_BYTES: size + SIZE_STORAGE_BYTES].tolist())))\n return result\n except pickle.UnpicklingError:\n raise Exception(\n 'Unable to unpickle data from other workers. all_gather_list requires all '\n 'workers to enter the function together, so this error usually indicates '\n 'that the workers have fallen out of sync somehow. Workers can fall out of '\n 'sync if one of them runs out of memory, or if there are other conditions '\n 'in your training script that can cause one worker to finish an epoch '\n 'while other workers are still iterating over their portions of the data.'\n )", "def all_gather_create_tensor_list(tensor: torch.Tensor, ngpus_per_node: int) -> List[torch.Tensor]:\n # tensor_list -> Output list. It should contain correctly-sized tensors to be used \n # for output of the collective.\n tensor_list = [ torch.zeros_like(tensor) for _ in range(ngpus_per_node) ]\n # Gathers tensors from the whole group in a list. \n # The variable `tensor` will not be affected by this operation.\n dist.all_gather(tensor_list=tensor_list, tensor=tensor)\n return tensor_list", "def _get_node_list(self, machine_name, max_nodes):\n\n q = Queue()\n # if machine_name.lower() == 'summit':\n # add relative node names starting with 1 for creating ERF files\n for i in range(max_nodes):\n q.put('{}'.format(i+1))\n return q", "def eventlist():\n\n infile = conf[\"run_path_derived\"] + 'LOCALIZED.txt'\n\n data = np.genfromtxt(infile, skip_header=1) \n\n mlt = cx.MAGtoMLT(data[:, 5], data[:, 0:5])\n\n # Swap mlat and mlon colums so in expected order (lat then long)\n data[:, [6,5]] = data[:, [5,6]]\n \n data = np.hstack((data, np.reshape(mlt, (mlt.shape[0], 1))))\n \n return data", "def build_network(self):\n\n\n logits_list = []\n for dn in self.find_datanodes():\n\n if len(dn.receives_from) == 0: continue\n\n logits = 0\n for rf in dn.receives_from:\n logits += rf.get_tensors(rf.connect_backwards())[0]\n\n logits_list.append(logits)\n\n return logits_list", "def flatten_data(data):\r\n return list(gen_flatten_data(data))", "def _build_from_chunks(self, data_node):\n result = ''\n\n if not data_node:\n return ''\n\n master_data = data_node[0]\n result = \"{}{}\".format(result, self._decode(master_data['value']))\n # if data is not in chunks, then return the first node's value\n if 'tags' not in master_data or 'chunks' not in master_data['tags']:\n return result\n\n # join the values in chunks\n last_chunk = int(master_data['tags']['chunks'])\n for chunk_id in range(1, last_chunk):\n slave_data = data_node[chunk_id]\n result = \"{}{}\".format(result, self._decode(slave_data['value']))\n return result", "def _finalize_data(self):\n\n if isinstance(self.node_data, np.ndarray): # SR workflow\n self.node_data = da.from_array(self.node_data)\n elif isinstance(self.node_data, list): # vr workflow\n struct_data = np.empty(len(self.node_data), dtype=self.data.dtype)\n datavals = np.array(self.node_data)\n for cnt, varname in enumerate(self.data.dtype.names):\n struct_data[varname] = datavals[:, cnt]\n self.node_data = da.from_array(struct_data)\n if isinstance(self.data, np.ndarray):\n self.data = da.from_array(self.data)", "def create_start_data(self):\n\t\tdef inputMesh(feature_size):\n\t\t\tc1= np.expand_dims(np.array([0,-0.9]),0)\n\t\t\tc2= np.expand_dims(np.array([-0.9,0.9]),0)\n\t\t\tc3= np.expand_dims(np.array([0.9,0.9]),0)\n\t\t\tx1 = np.expand_dims(np.pad(np.array([0,-0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx2 = np.expand_dims(np.pad(np.array([-0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx3 = np.expand_dims(np.pad(np.array([0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tedge_index = np.transpose(np.array([[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]])) # COO format\n\t\t\treturn np.concatenate((c1,c2,c3),axis=0), np.concatenate((x1,x2,x3),axis=0),edge_index\n\n\t\tc, x, edge_index = inputMesh(self.params.feature_size)# x is c with zeros appended, x=f ..pixel2mesh\n\t\tdata_list_x = []\n\t\tdata_list_c = []\n\t\tdata_list_pid = []\n\t\tfor i in range(self.params.batch_size):\n\t\t\tdata_list_x.append(Data(x=torch.Tensor(x).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_c.append(Data(x=torch.Tensor(c).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_pid.append(Data(x=torch.zeros(c.shape[0],1).type(dtypeL).requires_grad_(False)))\n\t\tbatch_x = Batch.from_data_list(data_list_x)\n\t\tbatch_c = Batch.from_data_list(data_list_c)\n\t\tbatch_pid = Batch.from_data_list(data_list_pid)\n\t\treturn batch_x, batch_c, batch_pid", "def data_collection():\n global PAUSED\n print(\"Detecting nodes\")\n while True:\n data = SOCK.recvfrom(1024)[0] # buffer size is 1024 bytes\n message = data.decode()\n try:\n message_function = message[0]\n message = message[1:]\n \n if message_function == \"t\":\n loc, temp, hum = message.split(\", \")\n temp = (float(temp) * 1.8) + 32 # convert from C to F\n\n # Checks if location is alreay in the rolling_X dictionarys. If not, it creates an entry\n # in the dictionary and populates it with the defaults\n if loc not in ROLLING_TEMPS:\n ROLLING_TEMPS[loc] = copy(TEMPDEQUEDEFAULT)\n print(loc, \"has connected\")\n if loc not in ROLLING_HUMS:\n ROLLING_HUMS[loc] = copy(HUMDEQUEDEFAULT)\n\n # Append new temp and humidity to appropriate deque in dictionaries\n ROLLING_TEMPS[loc].appendleft(temp)\n ROLLING_HUMS[loc].appendleft(hum)\n LAST_RECEIVED[loc] = datetime.datetime.utcnow()\n \n elif message_function == \"c\":\n if message == \"pause\":\n PAUSED = True\n print(\"pausing\")\n elif message == \"unpause\":\n PAUSED = False\n print(\"unpausing\")\n else:\n print(\"unknown command function\")\n elif message_function == \"i\":\n if message == \"status\":\n print(\"Paused:\", PAUSED)\n else:\n print(\"unknown info function\")\n except:\n print(\"malformed data\")", "def flatten_data(data):\r\n result = []\r\n for mesurements in data:\r\n result.append(mesurements.flatten())\r\n return np.array(result)", "def items(self):\n items = []\n current = self.head\n while current != None:\n items.append(current.data)\n current = current.next\n return items", "def graph_data(\n edge_list_path,\n node_features_path,\n protein_ids_path,\n protein_id_col_node=\"Gene\",\n protein_id_col_prot=\"ensembl.gene\",\n sparse_tensor=True,\n cut=0,\n):\n a = pd.read_csv(edge_list_path).values\n edge_attr = a[:, 2:] / 1000.0\n\n # cut the edges\n cut_mask = edge_attr[:, -1] > cut\n edge_ind = torch.tensor(a[:, :2][cut_mask], dtype=torch.long)\n edge_attr = torch.tensor(edge_attr[cut_mask], dtype=torch.float32)\n\n # force undirected\n if not is_undirected(edge_ind):\n edge_ind = torch.cat([edge_ind, edge_ind[:, [1, 0]]], 0)\n edge_attr = torch.cat([edge_attr, edge_attr], 0)\n\n # features\n protein_ids = pd.read_csv(protein_ids_path, sep=\"\\t\")[\n [\"id\", protein_id_col_prot]\n ]\n x = pd.read_csv(node_features_path, sep=\"\\t\")\n feature_columns = x.drop(protein_id_col_node, 1).columns\n x = pd.merge(\n protein_ids,\n x,\n how=\"left\",\n left_on=protein_id_col_prot,\n right_on=protein_id_col_node,\n ).sort_values(\"id\")[feature_columns]\n x.fillna(x.mean(), inplace=True)\n x = torch.tensor(((x - x.mean()) / x.std()).values, dtype=torch.float32)\n data = Data(x, edge_ind.T, edge_attr, id=torch.arange(x.shape[0]))\n\n if sparse_tensor:\n tsp = ToSparseTensor(False)\n data = tsp(data)\n\n return data", "def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward", "def query(self, data):\n to_return = []\n for element in data.data:\n element = torch.unsqueeze(element, 0)\n if len(self.data) < self.max_size:\n self.data.append(element)\n to_return.append(element)\n else:\n if random.uniform(0,1) > 0.5:\n i = random.randint(0, self.max_size-1)\n to_return.append(self.data[i].clone())\n self.data[i] = element\n else:\n to_return.append(element)\n return torch.cat(to_return)", "def _collect_data(self, current_generation):\n neighbours = self._Individuals()\n while True:\n\n if self._check_collected_data(neighbours):\n break\n if not self._data_consuming_queue.is_ready(current_generation):\n continue\n data = self._data_consuming_queue.consume_message(current_generation)\n if current_generation != data.generation:\n continue\n\n self._parse_received_data(neighbours, int(data.source), data.data)\n\n logger.info(\"RECEIVED data\" + str(data.data) + str(data.source))\n\n return neighbours", "def display_content(self):\n list = []\n traverse = self.head\n\n if self.head == None:\n return\n\n while traverse.next != None:\n list.append(traverse.data)\n traverse = traverse.next\n\n list.append(traverse.data)\n return list", "def offset_list(self):\n self.nodes.append(None)\n self.formulas.append(None)\n self.node_memory.append(None)\n self.formulas_memory.append(None)", "def merge_data(self, nodenet_data, keep_uids=False):\n\n uidmap = {}\n # for dict_engine compatibility\n uidmap[\"Root\"] = \"s1\"\n\n # re-use the root nodespace\n uidmap[\"s1\"] = \"s1\"\n\n # merge in spaces, make sure that parent nodespaces exist before children are initialized\n nodespaces_to_merge = set(nodenet_data.get('nodespaces', {}).keys())\n for nodespace in nodespaces_to_merge:\n self.merge_nodespace_data(nodespace, nodenet_data['nodespaces'], uidmap, keep_uids)\n\n # merge in nodes\n for uid in nodenet_data.get('nodes', {}):\n data = nodenet_data['nodes'][uid]\n parent_uid = data['parent_nodespace']\n if not keep_uids:\n parent_uid = uidmap[data['parent_nodespace']]\n if data['type'] in self.__nodetypes or data['type'] in self.native_modules:\n olduid = None\n if keep_uids:\n olduid = uid\n new_uid = self.create_node(\n data['type'],\n parent_uid,\n data['position'],\n name=data['name'],\n uid=olduid,\n parameters=data['parameters'],\n gate_parameters=data['gate_parameters'],\n gate_functions=data['gate_functions'])\n uidmap[uid] = new_uid\n node_proxy = self.get_node(new_uid)\n for gatetype in data['gate_activations']: # todo: implement sheaves\n node_proxy.get_gate(gatetype).activation = data['gate_activations'][gatetype]['default']['activation']\n\n else:\n warnings.warn(\"Invalid nodetype %s for node %s\" % (data['type'], uid))\n\n # merge in links\n for linkid in nodenet_data.get('links', {}):\n data = nodenet_data['links'][linkid]\n self.create_link(\n uidmap[data['source_node_uid']],\n data['source_gate_name'],\n uidmap[data['target_node_uid']],\n data['target_slot_name'],\n data['weight']\n )\n\n for monitorid in nodenet_data.get('monitors', {}):\n data = nodenet_data['monitors'][monitorid]\n if 'node_uid' in data:\n old_node_uid = data['node_uid']\n if old_node_uid in uidmap:\n data['node_uid'] = uidmap[old_node_uid]\n if 'classname' in data:\n if hasattr(monitor, data['classname']):\n getattr(monitor, data['classname'])(self, **data)\n else:\n self.logger.warn('unknown classname for monitor: %s (uid:%s) ' % (data['classname'], monitorid))\n else:\n # Compatibility mode\n monitor.NodeMonitor(self, name=data['node_name'], **data)", "def get_data_block(self, index, next_index):\n next_index = tf.minimum(next_index, self.Nt)\n indices = tf.range(index, next_index)\n data = [flatten_batch_dims(tf.gather(d, indices, axis=0),num_batch_dims=-self.event_size) for d in self.data]\n return data", "def get_output_nodes(self):\n \n\n self.buildings = self.dataset.groups['buildings']\n self.building_nodes = self.buildings.groups['nodes']\n\n eta_output_added = getattr(self.building_nodes,'eta_output_added')\n uv_output_added = getattr(self.building_nodes,'uv_output_added')\n\n eta = []\n uv = []\n nodeIds = []\n time = []\n \n if(eta_output_added or uv_output_added ):\n time = self.building_nodes.variables['time'][:].tolist()\n nodeIds = self.building_nodes.variables['id'][:].tolist()\n if eta_output_added: eta = self.building_nodes.variables['eta'][:].tolist()\n if uv_output_added: uv = self.building_nodes.variables['uv'][:].tolist()\n\n \n return nodeIds,eta, uv, time", "def display_content(self):\n list = []\n traverse = self.head\n\n if self.head == None:\n # print(\"Linked List is empty\")\n return\n\n while traverse.next != None:\n list.append(traverse.data)\n traverse = traverse.next\n\n list.append(traverse.data)\n return list", "def load_data():\n\n server_node = load_nodes(SERVER_NODE_INFILE)\n road_node = load_nodes(ROAD_NODE_INFILE)\n road_segment_point = load_nodes(ROAD_SEGMENT_POINT_INFILE)\n\n return server_node, road_node, road_segment_point", "def handle_leaf_nodes(self, nodes):\n max_node_id = -1\n for n in nodes:\n n.sum_hess = self.decrypt(n.sum_hess)\n n.sum_grad = self.decrypt(n.sum_grad)\n n.weight = self.splitter.node_weight(n.sum_grad, n.sum_hess)\n n.sitename = self.sitename\n if n.id > max_node_id:\n max_node_id = n.id\n new_nodes = [Node() for i in range(max_node_id + 1)]\n for n in nodes:\n new_nodes[n.id] = n\n return new_nodes", "def _process_nodes(self):\n # Sort the nodes by metanode type, then by id\n self.node_df = self.node_df.sort_values(['label', 'id']).reset_index(drop=True)\n # Get all the ids\n self.nodes = self.node_df['id']\n # Get mapping from the index to the node ID (one to many so need different one for each node type)\n self.index_to_nid = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.index_to_nid[group_name] = group['id'].reset_index(drop=True).to_dict()\n # Get the reverse mapping (many to one so don't need to separate based on type).\n self.nid_to_index = dict()\n for mapper in self.index_to_nid.values():\n for index, nid in mapper.items():\n self.nid_to_index[nid] = index\n # Finally, we need a mapper from id to node type\n self.id_to_metanode = self.node_df.set_index('id')['label'].to_dict()\n # And from node type to a list of ids\n self.metanode_to_ids = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.metanode_to_ids[group_name] = group['id'].tolist()\n # One more mapper of id to name\n self.nid_to_name = self.node_df.set_index('id')['name'].to_dict()", "def getNodes(self):\n nodes = [{\"address\": \"http://0.0.0.0:100\"}\n ,{\"address\": \"http://0.0.0.0:200\"}\n ,{\"address\": \"http://0.0.0.0:300\"}\n ,{\"address\": \"http://0.0.0.0:400\"}\n ,{\"address\": \"http://0.0.0.0:500\"}]\n return nodes", "def collect_outputs(self):\n # collect submex output hdf urls and add them to top mex outputs section\n top_mex = self.bqSession.fetchxml(self.options.mexURL, view='deep')\n outputTag = top_mex.xpath('/mex/tag[@name=\"outputs\"]')\n if not outputTag:\n # no \"outputs\" tag in mex => add it now\n etree.SubElement(top_mex, 'tag', name='outputs') \n top_mex = self.bqSession.postxml(url=top_mex.get('uri'), xml=top_mex, view='deep')\n outputTag = top_mex.xpath('/mex/tag[@name=\"outputs\"]')\n outputTag = outputTag[0]\n output_hdfs = top_mex.xpath('/mex/mex/tag[@name=\"outputs\"]/tag[@name=\"output_hdf\"]/@value')\n etree.SubElement(outputTag, 'tag', name='all_outputs', value=';'.join([ohdf.split('/')[-1] for ohdf in output_hdfs]))\n self.bqSession.postxml(url=outputTag.get('uri'), xml=outputTag)", "def get_all_data(self):\n\t\treply = self._send_command_to_entity_server(us.SERVER_COMMAND_REQUEST_ALL_DATA)\n\t\t#print(reply)\n\t\treturn reply", "def _get_nodes(self):\n viewpoint = \"shiva_{}\".format(cherrypy.session[\"id\"])\n messages_db = self.mongo[viewpoint][\"messages\"]\n people_db = self.mongo[viewpoint][\"people\"]\n #\n senders = messages_db.distinct(\"sender\")\n owner_id = cherrypy.session[\"id\"]\n nodes = list()\n for sender in senders:\n person = people_db.find_one({\"id\": sender})\n if person is None:\n name = \"id{}\".format(sender)\n else:\n name = person[\"display_name\"]\n records = list(messages_db.aggregate([{\n \"$match\": {\n \"$or\": [\n {\"sender\": owner_id, \"receiver\": sender},\n {\"sender\": sender, \"receiver\": owner_id}\n ]\n }\n }, {\"$group\": {\"_id\": None, \"count\": {\"$sum\": 1}}}]))\n if not records:\n records = 0\n else:\n records = records[0][\"count\"]\n info = \"Total records: {}\".format(records)\n history_link = \"/vk/read?id={}\".format(sender)\n statistics_link = \"#\"\n if records > 0:\n nodes.append({\n \"id\": sender,\n \"name\": name,\n \"info\": info,\n \"records\": records,\n \"history_link\": history_link,\n \"statistics_link\": statistics_link\n })\n #\n return nodes", "def getPoolData(self):\r\n # type: () -> (list[Data.Data])\r\n output = []\r\n # start from the beginning of the pool area\r\n ea = self.func_ea + self.getSize(withPool=False)\r\n while ea < self.getSize(withPool=True):\r\n # create and append the data item\r\n data = Data.Data(ea)\r\n output.append(data)\r\n # advance ea to the next item\r\n ea += data.getSize()\r\n return output", "def concatenate_data():", "def get_nodes(self):\n self.map_graph_id()\n self.nodes_list = [\n self.NX_GRAPHS[self.graph_id].nodes[idx]['label'] \n for idx in range(len(self.NX_GRAPHS[self.graph_id].nodes))]", "def get_node_list(self):\n return []", "def _read_data(self):\n return [np.array([]), np.array([])]", "def forward(self, data):\r\n x = F.celu(self.lin0(data.x)) # data.x [#,#node features] -lin0-> [#, hidden_dim]\r\n for conv in self.convs:\r\n # conv <- Block [#, hidden_dim]\r\n x = x + F.dropout(conv(x, data.edge_index, data.edge_attr), p=self.dropout, training=self.training)\r\n x = self.set2set(x, data.batch) # [batch_size, 2*hidden_dim]\r\n \"\"\" data.batch marks the atoms that belongs to each one of the 128 molecules of a batch\"\"\"\r\n x = self.out(F.dropout(x, p=self.dropout, training=self.training)) #[batch_size, 2]\r\n\r\n return x", "def _recv(self) -> List[np.ndarray]:", "def _build_node_data_matrix(self, mins: list, maxs: list):\n\n self.mins = mins\n self.maxs = maxs\n\n if self.is_vr:\n self.node_data = []\n else: # calculate MxN shape\n size = int((self.maxs[0] - self.mins[0]) / self.min_grid_size)\n # cast to float64 to allow us to use np.nan as nodatatype\n dtyp = [(varname, np.float) for varname in ['z', 'tvu'] if varname in self.data.dtype.names]\n self.node_data = np.full((size, size), np.nan, dtype=dtyp)", "def get_data():\n samples = []\n for fn in files:\n samples.extend(_json.load(open(fn, \"r\")))\n for sample in samples:\n graph = _nx.readwrite.json_graph.node_link_graph(sample)\n _edges = graph.edges(data=True)\n _nodes = dict(graph.nodes(data=True)).values()\n sources, targets, edges = zip(*[(src, tgt, edge) for src, tgt, edge in _edges])\n edge_features = _tf.constant(_np.array([\n [edge[k] for k in edge_feature_names if k in edge] for edge in edges\n ]))\n edge_sources = _tf.squeeze(_tf.constant(_np.array(sources)))\n edge_targets = _tf.squeeze(_tf.constant(_np.array(targets)))\n node_features = _tf.constant(_np.array([\n [node[k] for k in node_feature_names if k in node]\n for node in _nodes\n ]))\n additional_inputs = (\n _tf.constant(_np.array([\n [node[k] for k in additional_inputs_names if k in node]\n for node in _nodes\n ]))\n if local else\n _tf.constant(_np.array([\n graph.graph[additional_input] for additional_input in additional_inputs_names\n if additional_input in graph.graph\n ]))\n )\n data = GNNInput(\n edge_features=edge_features,\n edge_sources=edge_sources,\n edge_targets=edge_targets,\n node_features=node_features,\n additional_inputs=additional_inputs,\n )\n if local:\n y = _tf.squeeze(_tf.constant(_np.array([\n [node[k] for k in target if k in node] for node in _nodes\n ])))\n else:\n y = _tf.constant(_np.array([\n graph.graph[_target] for _target in target if _target in graph.graph\n ]))\n yield data, y", "def populate_graph(self):", "def _gather_data(self):\n for data in self._collection:\n label = data.label\n label = disambiguate(label, self._data)\n self._data[label] = data", "def getNodes(self):\n data = self.connect('get','nodes',None)\n return data", "def plotExternalNodes( self ):\n\n max_x = max(self.mNodeWidthsEnd)\n for node_id in self.mTree.get_terminals():\n\n node = self.mTree.node( node_id )\n\n x = self.mNodeWidthsEnd[node_id]\n y = self.mNodeHeights[node_id]\n \n if self.mLeftJustifiedExternalNodes:\n x_label = max_x\n else:\n x_label = x\n \n e = self.mDecoratorExternalNodes.getElements( node_id,\n self.getHeaderWidth() + x,\n self.getHeaderHeight() + y,\n self.getHeaderWidth() + x_label,\n self.getHeaderHeight() + y )\n \n self.addElements(e)", "def _beam_data(beams, network):\n inds, indi, indf = [], [], []\n EIx, EIy = [], []\n for key in beams:\n nodes = beams[key]['nodes']\n inds.extend(nodes[0:-2])\n indi.extend(nodes[1:-1])\n indf.extend(nodes[2:])\n EIx.extend([network.vertex[i]['EIx'] for i in nodes[1:-1]])\n EIy.extend([network.vertex[i]['EIy'] for i in nodes[1:-1]])\n EIx = array(EIx)[:, newaxis]\n EIy = array(EIy)[:, newaxis]\n return inds, indi, indf, EIx, EIy", "def generate_list(self):\n\n self.queue.append(self.root_node)\n\n while self.queue:\n node = self.queue.pop(0)\n\n self.rtn_list.append(node.data)\n\n if node.left:\n self.queue.append(node.left)\n\n if node.right:\n self.queue.append(node.right)\n\n return self.rtn_list", "def get_devices_per_node(self):\n\n for i in self._nodes.items():\n node = i[1]\n # Update the interface data\n\n self._get_device(node)\n\n self.updateconfig()", "def gatherData(self):\n dagPath, components = self.__getGeometryComponents()\n self.gatherInfluenceWeights(dagPath, components)\n self.gatherBlendWeights(dagPath, components)\n\n for attr in ['skinningMethod', 'normalizeWeights']:\n self.data[attr] = cmds.getAttr('%s.%s' % (self.node, attr))", "def iter_nodes(self):", "def reduce(self, app, nodes, result):", "def get_nodes(self):\n pass", "def plotInternalNodes( self ):\n\n \n for node_id in self.mTree.chain.keys():\n\n node = self.mTree.node( node_id )\n if node.succ == []: continue\n \n x = self.mNodeWidthsEnd[node_id]\n y = self.mNodeHeights[node_id]\n\n e = self.mDecoratorInternalNodes.getElements( node_id,\n self.getHeaderWidth() + x,\n self.getHeaderHeight() + y )\n \n self.addElements(e)", "def gen_ep_data(self,min_trial_len=2,max_trial_len=3,ntrials=2):\n # self.randomize_emat()\n tseq,xseq,yseq = self.gen_seqs_multitrial(min_trial_len,max_trial_len,ntrials)\n xseq_embed = self.embed_xseq(xseq)\n # np to torch\n tseq = tr.unsqueeze(tr.LongTensor(tseq),1)\n xseq_embed = tr.unsqueeze(tr.Tensor(xseq_embed),1)\n yseq = tr.unsqueeze(tr.LongTensor(yseq),1)\n return tseq,xseq_embed,yseq", "def get_master_nodes(self):\n default = 3\n master_nodes_count = input('enter number of master nodes\\n'\n 'default [3]: ')\n master_nodes_count = set_values(master_nodes_count, default, check='integer')\n master_keys = ['name','ip','mac']\n self.inventory_dict['csah']['vars']['master_nodes'] = []\n for num in range(master_nodes_count):\n master_values = []\n default = 'etcd-{}'.format(num)\n master_name = input('enter the master {} node name \\n'\n 'default [{}]: '.format(num, default))\n master_name = set_values(master_name, default)\n master_ip = get_ip(node_name=master_name, ip_type='os')\n master_mac = get_network_device_mac(node_name=master_name, ip_type='idrac')\n master_values.append(master_name)\n master_values.append(master_ip)\n master_values.append(master_mac)\n master_node_dict_pairs = dict(zip(master_keys, master_values))\n logging.info('adding {} values as name: {} ip: {} mac: {}'.format(master_name, master_name,\n master_ip, master_mac)) \n self.inventory_dict['csah']['vars']['master_nodes'].append(master_node_dict_pairs)\n self.clear_screen()\n self.inventory_dict['csah']['vars']['number_of_masters'] = master_nodes_count", "def get_node_list(self):\n logger.debug('Updating node list')\n self.subscribe_mqtt('/nodes/+/responses/ping')\n self.node_ids = []\n\n def on_response(payload, data):\n if data and data.get('node', None):\n node_id = data['node']\n logger.debug('Found node with ID \"%s\"' % node_id)\n\n if node_id not in self.node_ids:\n self.node_ids.append(node_id)\n\n return False\n\n self.publish_mqtt('/ping', on_response=on_response)\n time.sleep(self.timeout / 1000)\n\n return self.node_ids", "def collect(self):\r\n for idx_ds, ds in enumerate(self.datasets):\r\n # print \"collect() using ds = \", repr(ds)\r\n reader_rows = ds.dict_reader()\r\n for row in reader_rows:\r\n # print \"Collect: idx_ds=%d\" % idx_ds\r\n for summary in self.summaries:\r\n #Derive coords from the row for this summary\r\n coords=[]\r\n for sum_col in summary.columns:\r\n level = str(row[sum_col.name])\r\n if level is not None and level != \"None\": \r\n if level.find('.') != -1:\r\n # May be a float value with .0 ending to trim\r\n try:\r\n # If value is parsable as a float, and it \r\n # is an integer, represent it as an integer.\r\n flevel = float(level)\r\n # Strip a final .0 from the string.\r\n level = (\r\n str(int(flevel)) if flevel == int(flevel)\r\n else str(level))\r\n except:\r\n # Not a float, OK.\r\n pass\r\n else:\r\n level = \"\"\r\n coords.append(level)\r\n #print \"coords:\", repr(coords)\r\n #Register row data into this summary.\r\n cell = summary.cell(coords)\r\n #Future, along with ds_index, could also pass along \r\n # row's ordinal column values.\r\n # Note to self: rename accrue_row to accrue_row() \r\n # when get into eclipse env\r\n cell.entry.accrue_row(idx_ds)", "def prepare_data_for_d(data, id2motifs, generator):\n motifs = []\n labels = []\n g_s_args = []\n poss = []\n negs = []\n for i in range(data.x.size(0)):\n if np.random.rand() < 1:\n pos = random.sample(id2motifs[i], min(len(id2motifs[i]), n_sample))\n poss.append(pos)\n g_s_args.append((i, len(pos), True))\n\n\n z = generator(data.x, data.total_edge_index)\n # row, col = data.total_edge_index\n\n\n # x_j = torch.index_select(z, 0, row)\n # x_i = torch.index_select(z, 0, col)\n # one_hop = torch.einsum(\"ef,ef->ef\", x_i, x_j)\n\n negs, _ = sampling(g_s_args, z, data)\n\n # negs =[]\n # for i in range(data.x.size(0)):\n # neg=[]\n # if(len(poss[i])>0):\n # ps= torch.tensor(poss[i][0]).to(device)\n # # pdb.set_trace()\n # x_j = torch.index_select(one_hop, 0, ps)\n # x_i = torch.index_select(one_hop, 0, ps)\n # two_hop = torch.einsum(\"ef,ef->e\", x_j, x_i)\n # __, target = torch.topk(two_hop, len(poss[i]))\n # for k in range(len(poss[i])):\n # neg.append((i, row[target[k]].item(), col[target[k]].item()))\n # negs.append(neg)\n\n \n for pos, neg in zip(poss, negs):\n if len(pos) != 0 and neg is not None:\n motifs.extend(pos)\n labels.extend([1] * len(pos))\n motifs+=neg\n labels.extend([0] * len(neg))\n motifs, labels = shuffle(motifs, labels)\n pdb.set_trace()\n return motifs, labels", "def test_get_hyperflex_node_list(self):\n pass", "def build(self):\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n\n self.node = np.zeros(self.ntotal, dtype='int32')\n #oxx, oyy, ozz, txy, pressure\n self.data = np.zeros((self.ntimes, self.ntotal, 5), dtype='float32')\n self.location = np.empty(self.ntotal, dtype='U8')\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def get_data_nodes(self, key=None):\n if not key:\n node = self._root\n else:\n node = self.get_node(key)\n\n if node:\n for n in node:\n if n.has_data():\n yield n", "def combineemlts(ielist=None):\n # --- The list defaults to all emlt elements\n# if ielist is None:\n # --- ielist argument is ignored, and is set to include all of them.\n ielist = range(top.nemlt+1)\n\n assert (top.emltox.min() == top.emltox.max() and\n top.emltoy.min() == top.emltoy.max() and\n top.emltot.min() == top.emltot.max() and\n top.emltop.min() == top.emltop.max() and\n top.emltlb.min() == top.emltlb.max()),\\\n \"The offsets of all of the elements must be the same\"\n\n # --- Get the full range needed for the new element\n zs = min(top.emltzs[ielist])\n ze = max(top.emltze[ielist])\n\n # --- Setup the z-grid for the new element\n dz = min(top.dzemlt[top.emltid[ielist]-1])\n nzmax = nint((ze - zs)/dz)\n dz = (ze - zs)/nzmax\n\n # --- Create arrays to hold the combined data\n es = zeros((nzmax+1,top.nesmult))\n phz = zeros((nzmax+1,top.nesmult))\n zz = linspace(zs, ze, nzmax+1)\n\n # --- Use interpolation to gather the multiple moments data into a single\n # --- array.\n for ie in ielist:\n for imult in range(top.nesmult):\n id = top.emltid[ie] - 1\n if id < 0: continue\n # --- Note that zeie is set to include all of the data and is not\n # --- necessarily equal to top.emltze[ie].\n # --- This could be problematic if there is nonzero extraneous data\n # --- in the array beyond emltze.\n zsie = top.emltzs[ie]\n zeie = zsie + top.dzemlt[id]*top.nzemltmax\n\n # --- Do the interpolation for both the amplitude and phase.\n estemp = zeros(nzmax+1)\n esold = top.esemlt[:,imult,id]*(top.emltsf[ie] + top.emltsc[ie])\n getgrid1d(nzmax+1,zz,estemp,top.nzemltmax,esold,zsie,zeie)\n phtemp = zeros(nzmax+1)\n phold = top.esemltph[:,imult,id] + top.emltph[ie]\n getgrid1d(nzmax+1,zz,phtemp,top.nzemltmax,phold,zsie,zeie)\n\n esnew = sqrt((estemp*cos(phtemp) + es[:,imult]*cos(phz[:,imult]))**2 +\n (estemp*sin(phtemp) + es[:,imult]*sin(phz[:,imult]))**2)\n phnew = arctan2(estemp*sin(phtemp) + es[:,imult]*sin(phz[:,imult]),\n estemp*cos(phtemp) + es[:,imult]*cos(phz[:,imult]))\n es[:,imult] = esnew\n phz[:,imult] = phnew\n\n # --- Save the _n and _v, and other values that make sense\n emlt_n = top.emlt_n.copy()\n emlt_v = top.emlt_v.copy()\n ap = top.emltap[ielist[0]]\n ax = top.emltax[ielist[0]]\n ay = top.emltay[ielist[0]]\n ph = top.emltph[ielist[0]]\n aps = min(top.emltas[ielist])\n ape = max(top.emltae[ielist])\n ox = top.emltox[ielist[0]]\n oy = top.emltoy[ielist[0]]\n ot = top.emltot[ielist[0]]\n op = top.emltop[ielist[0]]\n lb = top.emltlb[imlist[0]]\n\n # --- For now, just clear out all of the data, assuming that all emlts are\n # --- being combined.\n top.nemlt = -1\n top.nemltsets = 0\n top.nesmult = 0\n top.nzemltmax = 0\n gchange(\"Lattice\")\n gchange(\"Mult_data\")\n\n # --- Add the new, combined element\n addnewemlt(zs,ze,ap=ap,ax=ax,ay=ay,ph=ph,aps=aps,ape=ape,\n ox=ox,oy=oy,ot=ot,op=op,lb=lb,\n es=es,phz=phz,nn=emlt_n,vv=emlt_v)", "def get_all(self):\n self.data = list(self.data)\n return self.data", "def display(self):\r\n elems = [] #create a list of elements we've seen\r\n current_node = self.head\r\n while current_node.next!=None:\r\n current_node = current_node.next\r\n elems.append(current_node.data)\r\n print(elems)", "def nodes_from_pool_list(mnode):\n pool_list_data = get_pool_list(mnode)\n if pool_list_data is None:\n g.log.error(\"Unable to get Nodes from the pool list command.\")\n return None\n\n nodes = []\n for item in pool_list_data:\n nodes.append(item['hostname'])\n return nodes", "def get_worker_nodes(self):\n worker_nodes_count = input('enter number of worker nodes\\n'\n 'default [2]: ')\n default = 2\n worker_nodes_count = set_values(worker_nodes_count, default, check='integer')\n worker_keys = ['name','ip','mac']\n self.inventory_dict['csah']['vars']['worker_nodes'] = []\n for num in range(worker_nodes_count):\n worker_values = []\n default = 'worker-{}'.format(num)\n worker_name = input('enter the worker {} node name\\n'\n 'default [{}]: '.format(num, default))\n worker_name = set_values(worker_name, default)\n worker_ip = get_ip(node_name=worker_name, ip_type='os')\n worker_mac = get_network_device_mac(node_name=worker_name, ip_type='idrac')\n worker_values.append(worker_name)\n worker_values.append(worker_ip)\n worker_values.append(worker_mac)\n worker_node_dict_pairs = dict(zip(worker_keys, worker_values))\n logging.info('adding {} values as name: {} ip: {} mac: {}'.format(worker_name, worker_name,\n worker_ip, worker_mac)) \n self.inventory_dict['csah']['vars']['worker_nodes'].append(worker_node_dict_pairs)\n self.clear_screen()\n self.inventory_dict['csah']['vars']['number_of_workers'] = worker_nodes_count", "def ingest_node_results(self, filename, extension=-1):\n\n # Which node is this?\n wg, node_name = utils.parse_node_filename(filename)\n #node_id = self.retrieve_node_id(wg, node_name)\n uves_node_id = self.retrieve_node_id(wg, \"UVES-{}\".format(node_name))\n giraffe_node_id = self.retrieve_node_id(wg, \"GIRAFFE-{}\".format(node_name))\n\n # Start ingesting results.\n data = Table.read(filename, hdu=extension)\n\n #default_row = {\"node_id\": node_id}\n default_row = {\"node_id\": -1}\n columns = (\n \"node_id\", \"cname\", \"filename\", \"setup\", \"snr\",\n \"vel\", \"e_vel\", \"vrot\", \"e_vrot\",\n \"teff\", \"e_teff\", \"nn_teff\", \"enn_teff\", \"nne_teff\", \"sys_err_teff\",\n \"logg\", \"e_logg\", \"nn_logg\", \"enn_logg\", \"nne_logg\", \"sys_err_logg\", \"lim_logg\",\n \"feh\", \"e_feh\", \"nn_feh\", \"enn_feh\", \"nne_feh\", \"sys_err_feh\",\n \"xi\", \"e_xi\", \"nn_xi\", \"enn_xi\", \"nne_xi\",\n \"mh\", \"e_mh\", \"nn_mh\", \"enn_mh\", \"nne_mh\",\n \"alpha_fe\", \"e_alpha_fe\", \"nn_alpha_fe\", \"enn_alpha_fe\", \"nne_alpha_fe\",\n \"vrad\", \"e_vrad\", \"vsini\", \"e_vsini\",\n \"peculi\", \"remark\", \"tech\")\n\n # Update formats, as necessary.\n tmp_key_format = \"{}_NEW_DTYPE\"\n for key, new_dtype in _FITS_FORMAT_ADAPTERS.items():\n\n # FUCK THESE IDIOTIC PEOPLE WHAT THE FUCK IS WRONG WITH THEM\n if node_name == \"Carmela-Elena\":\n\n if key in (\"teff\", \"e_teff\", \"logg\"):\n data[tmp_key_format.format(key.upper())] = _adapt_str_to_float(data[key.upper()])\n del data[key.upper()]\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n elif key in (\"feh\", \"e_feh\"):\n del data[key.upper()]\n data[tmp_key_format.format(key.upper())] = np.nan * np.ones(len(data))\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n elif key in (\"tech\", \"peculi\", \"remark\"):\n del data[key.upper()]\n data[tmp_key_format.format(key.upper())] = [\"\"] * len(data)\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n\n elif node_name == \"Porto\" and key in (\"teff\", \"e_teff\", \"feh\", \"e_feh\"):\n data[tmp_key_format.format(key.upper())] = _adapt_str_to_float(data[key.upper()])\n del data[key.upper()]\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n data[tmp_key_format.format(key.upper())] = np.array(data[key.upper()], dtype=new_dtype)\n del data[key.upper()]\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n N = len(data)\n for i, row in enumerate(data):\n logger.info(\"Ingesting row {}/{} from node WG{}: {}\".format(i, N,\n wg, node_name))\n row_data = {}\n row_data.update(default_row)\n row_data.update(dict(zip(columns[1:], [row[c.upper()] for c in columns[1:]])))\n\n if row_data[\"setup\"].strip() == \"UVES\":\n row_data[\"node_id\"] = uves_node_id\n elif row_data[\"setup\"].strip() == \"GIRAFFE\":\n row_data[\"node_id\"] = giraffe_node_id\n else:\n raise WTFError\n\n if node_name.lower() == \"carmela-elena\":\n for key in (\"tech\", \"peculi\", \"remark\"):\n row_data[key] = str(row_data[key])\n\n use_columns = [] + list(columns)\n for k in row_data.keys():\n if isinstance(row_data[k], (bool, np.bool_)):\n del row_data[k]\n use_columns.remove(k)\n\n self.execute(\n \"INSERT INTO results ({}) VALUES ({})\".format(\n \", \".join(use_columns),\n \", \".join([\"%({})s\".format(column) for column in use_columns])),\n row_data)\n\n else:\n self.execute(\n \"INSERT INTO results ({}) VALUES ({})\".format(\n \", \".join(columns),\n \", \".join([\"%({})s\".format(column) for column in columns])),\n row_data)\n\n self.connection.commit()\n return N", "def get_nodes(self):\n with open('node_list.txt', 'r') as file:\n self.nodes = [line.rstrip('\\n') for line in file]", "def get_data_manager():\n return IncomingEdge.items", "def __iter__(self):\n for batch in self.data:\n batch_size = len(batch)\n X, e1, e2, dist1, dist2, e1_pos, e2_pos, y = list(zip(*batch))\n\n x_len = max(len(x) for x in X)\n x_ids = torch.LongTensor(batch_size, x_len).fill_(0)\n dist1_padded = torch.LongTensor(batch_size, x_len).fill_(0)\n dist2_padded = torch.LongTensor(batch_size, x_len).fill_(0)\n for i, doc in enumerate(X):\n x_ids[i, :len(doc)] = torch.LongTensor(doc)\n\n dist1_padded[i, :len(doc)] = torch.LongTensor(dist1[i])\n dist1_padded[i, len(doc):] = torch.LongTensor([pos(e1_pos[i][1] - idx) for idx, _ in enumerate(x_ids[i][len(doc):], start=len(doc))])\n\n dist2_padded[i, :len(doc)] = torch.LongTensor(dist2[i])\n dist2_padded[i, len(doc):] = torch.LongTensor([pos(e2_pos[i][1] - idx) for idx, _ in enumerate(x_ids[i][len(doc):], start=len(doc))])\n\n e1_tensor = torch.LongTensor(e1)\n e2_tensor = torch.LongTensor(e2)\n\n y_tensor = torch.LongTensor(y)\n\n if self.gpu:\n x_ids = x_ids.pin_memory()\n e1_tensor = e1_tensor.pin_memory()\n e2_tensor = e2_tensor.pin_memory()\n dist1_padded = dist1_padded.pin_memory()\n dist2_padded = dist2_padded.pin_memory()\n y_tensor = y_tensor.pin_memory()\n\n yield (x_ids, e1_tensor, e2_tensor, dist1_padded, dist2_padded, y_tensor)", "def get_all_metadata(self):\n return self.db.get_all_nodes()", "def mostrEmpl2(finalData): #Esta sección fue hecha por Ángel\n listaUE = []\n for elemento in finalData:\n nombre = elemento[0]\n listaUE.append(nombre) \n return listaUE", "def _setData(self):\n self._data = [ item for ministry in self.query.all() \n for item in self._getItems(ministry) ]", "def yield_metadata_chunk(tf, max_epmc_metadata, chunk_size=500):\n pub_list = []\n for index, metadata in enumerate(yield_publications_metadata(tf)):\n pub_list.append(metadata)\n if max_epmc_metadata and index + 1 >= max_epmc_metadata:\n yield pub_list\n pub_list = []\n return\n\n if len(pub_list) >= chunk_size:\n yield pub_list\n pub_list = []\n if pub_list:\n yield pub_list", "def build(self):\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n self.nelements //= self.ntimes\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self.node = np.zeros(self.ntotal, dtype=idtype)\n #lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm\n self.data = np.zeros((self.ntimes, self.ntotal, 14), dtype=fdtype)\n self.location = np.empty(self.ntotal, dtype='U8')\n\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def parse(self, data):\n\n for node in data:\n nodeObj = GraphiusNode(id=node['id'], value=node['value'])\n self.nodes[nodeObj.id] = nodeObj\n\n # Create links to nieghbors:\n for node in data:\n nodeObj = self.nodes[node['id']]\n for neighborId in node['children']:\n nodeObj.addNeighbor(self.nodes[neighborId])\n self.childNodes[neighborId] = self.nodes[neighborId]", "def lego_sets():\n \n \n data_test=data_specific\n\n\n\n \n \n\n \n print(data_test)\n print(\"The size of the data is: \",len(data_test))\n \n \n \n # you must replace this line and return your own list\n return data_test", "def add_nodes(self):\n for node_id in self.nodes:\n x = self.nodes[node_id][0]\n y = self.nodes[node_id][1]\n if node_id == 0:\n self.G.add_node(\"Source\", x=x, y=y, demand=0)\n self.G.add_node(\"Sink\", x=x, y=y, demand=0)\n else:\n self.G.add_node(node_id, x=x, y=y, demand=0)", "def dumpData(self,out):\n #--Get sizes and dump into dataIO\n self.hedr.getSize()\n self.hedr.dump(out)\n for (name,size) in self.masters:\n out.packSub0('MAST',name)\n out.packSub('DATA','Q',size)\n if self.gmdt: \n self.gmdt.getSize()\n self.gmdt.dump(out)\n for other in self.others:\n other.getSize()\n other.dump(out)", "def addnewemltdataset(zlen,es,esp=None,phz=None,phpz=None,nn=None,vv=None):\n # --- Make sure that the data set has the same number of multipole\n # --- components or less, or that both n and v are passed in.\n assert ((len(shape(es)) == 1) or (shape(es)[1] <= top.nesmult) or \\\n (nn is not None and vv is not None)),\\\n \"The shape of the dataset must be consistent with the data already created or both n and v must be specified\"\n\n # --- Now setup the multipole component dataset.\n top.nemltsets = top.nemltsets + 1\n\n # --- Make sure that es is a 2-D array (first dimension is data versus z,\n # --- second is number of multipole components)\n if len(shape(es)) == 1:\n es = transpose(array([es]))\n if esp is not None: esp = transpose(array([esp]))\n if phz is not None: phz = transpose(array([phz]))\n if phpz is not None: phpz = transpose(array([phpz]))\n\n # --- Make sure that the first dimension of the arrays is long enough\n if shape(es)[0] > top.nzemltmax+1: top.nzemltmax = shape(es)[0] - 1\n\n # --- Change the sizes of the arrays\n gchange(\"Mult_data\")\n\n # --- Set basic parameters\n n0 = shape(es)[0] # --- Number of data points along z\n n1 = shape(es)[1] # --- Number of multipole components\n top.nzemlt[-1] = n0 - 1\n top.dzemlt[-1] = zlen/(n0 - 1.)\n\n if nn is None and vv is None:\n assert top.nesmult > 0,'There are no emlt data sets, so the nn and vv arguments must be specified'\n # --- Assume n and v are ordered correctly and just copy the data in\n top.esemlt[:n0,:n1,-1] = es\n if esp is not None: top.esemltp[:n0,:n1,-1] = esp\n if phz is not None: top.esemltph[:n0,:n1,-1] = phz\n if phpz is not None: top.esemltphp[:n0,:n1,-1] = phpz\n\n else:\n # --- Make sure that n and v are lists\n if len(shape(nn)) == 0: nn = list([nn])\n else: nn = list(nn)\n if len(shape(vv)) == 0: vv = list([vv])\n else: vv = list(vv)\n\n # --- Make es a list of arrays\n es = list(transpose(es))\n if esp is not None: esp = list(transpose(esp))\n if phz is not None: phz = list(transpose(phz))\n if phpz is not None: phpz = list(transpose(phpz))\n\n # --- Loop over existing multipole components\n for i in range(top.nesmult):\n # --- Loop over input multipole components checking if any are the same\n for j in range(len(nn)):\n if nn[j] == top.emlt_n[i] and vv[j] == top.emlt_v[i]:\n # --- If so, then copy the data to the appropriate place and\n # --- delete the data from the lists.\n top.esemlt[:n0,i,-1] = es[j]\n if esp is not None: top.esemltp[:n0,i,-1] = esp[j]\n if phz is not None: top.esemltph[:n0,i,-1] = phz[j]\n if phpz is not None: top.esemltphp[:n0,i,-1] = phpz[j]\n del nn[j],vv[j],es[j]\n if esp is not None: del esp[j]\n if phz is not None: del phz[j]\n if phpz is not None: del phpz[j]\n break\n\n # --- Now copy in any left over data, increasing the number of multipole\n # --- components.\n if len(nn) > 0:\n ln = len(nn)\n top.nesmult = top.nesmult + ln\n gchange(\"Mult_data\")\n top.emlt_n[-ln:] = nn\n top.emlt_v[-ln:] = vv\n top.esemlt[:n0,-ln:,-1] = transpose(array(es))\n if esp is not None: top.esemltp[:n0,-ln:,-1] = transpose(array(esp))\n if phz is not None: top.esemltph[:n0,-ln:,-1] = transpose(array(phz))\n if phpz is not None: top.esemltphp[:n0,-ln:,-1] = transpose(array(phpz))\n\n return top.nemltsets", "def targets(self) -> List[List[float]]:\n if self.preload:\n return [[j[QM9.U0].item()] for j in self.data_ram] # if data_ram is a array of dicts\n # return self.data_ram[QM9.U0].numpy() # if data_ram is a dict of arrays\n else:\n return [[j[QM9.U0].item()] for j in self.data]", "def network_nodes(self):\n nodes = []\n for node in self.filelist:\n node_attributes = {\"type\": node.suffix}\n if node_attributes[\"type\"] == \".py\":\n node_attributes[\"size\"] = (\n log(self.get_filesize(self.sourcepath / node) + 25) * 2\n )\n node_attributes[\"color\"] = {\n \"border\": \"rgba(0,70,10,1)\",\n \"background\": \"rgba(0, 120, 20 ,1)\",\n }\n nodes.append((self.name(node), node_attributes))\n return nodes", "def populate_data(self):\r\n # Importing StationData with the standard imports causes a redundancy\r\n # problem, so it is imported here only when it is needed.\r\n from stationData import StationData\r\n # Find data requirements from all plumes.\r\n requirements = describe.PLUMES\r\n # Loop over plumes and define parameters to be used for pulling data.\r\n grib_file = pygrib.open(self.grib_file_path)\r\n for req in requirements:\r\n (plume,data_types,grid_level_type,grid_level,unused) = req\r\n selected = grib_file.select(shortName=data_types,\r\n typeOfLevel=grid_level_type,\r\n level=grid_level)\r\n for i, message in enumerate(selected):\r\n if i % 20 == 0:\r\n print '%s %s/%s Grib messages processed for %s' %\\\r\n (PRETEXT, i + 1, len(selected), req[0])\r\n for sdo in StationData.instances:\r\n if sdo.grib_i is None:\r\n StationData.populate_grid_information(message,\r\n self.config)\r\n sdo.add_data(plume,self.member_name,message)\r\n grib_file.close()\r\n return", "def partition_data(self):\n\n _header_ = self._header_ + 'partition_data(): '\n\n if self.verbose:\n print(_header_ + 'Partitioning data ...')\n\n network = self._useful_network()\n\n if self.nidx_train:\n # The only reason that allows .nidx to not be empty would be that a training Data was copied over\n # hence, the training node indices are retained and need to be excluded\n print(_header_ + 'Excluding %d training nodes transfered from training dataset ...' % len(self.nidx_train))\n nidx = set(self.nidx2lidx.keys()) - set(self.nidx_train)\n self.nidx_exclude += self.nidx_train\n self.nidx_train = []\n else:\n nidx = set(self.nidx2lidx.keys())\n\n for l in nidx:\n if l in network:\n if self.node_labels[l]:\n self.nidx_train.append(l)\n else:\n self.nidx_exclude.append(l)\n\n if self.verbose:\n print(_header_ + 'Found %d nodes' % len(self.nidx2lidx))\n print(' %d nodes with labels of interest' % len(self.nidx_train))\n print(' %d nodes can be used to predict' % len(self.nidx_pred))\n print(' %d nodes cannot be mapped due to lack of mappable links' % len(self.nidx_exclude))\n\n return self", "def get_nodes(self):\n return list(map(lambda x: x[0], self.__nodes))", "def _flatten(self):\n n = self.B\n idx = self.nodect - 1\n self.seq = []\n while n is not None:\n n['idx'] = idx\n self.seq.insert(0, n)\n idx -= 1\n n = n['pred']", "def get_lumped_matrices(self):\n total_dof = DOF_PER_NODE_STRUCTURAL * len(self.preprocessor.nodes)\n \n if self.frequencies is None:\n cols = 1\n else:\n cols = len(self.frequencies)\n \n list_Kdata = []\n list_Mdata = []\n list_Cdata = []\n\n i_indexes_M, j_indexes_M = [], []\n i_indexes_K, j_indexes_K = [], []\n i_indexes_C, j_indexes_C = [], []\n \n self.nodes_with_lumped_masses = []\n self.nodes_connected_to_springs = []\n self.nodes_connected_to_dampers = []\n # self.nodes_with_nodal_elastic_links = []\n\n flag_Clump = False\n\n # processing external elements by node\n for node in self.preprocessor.nodes.values():\n\n # processing mass added\n if node.there_are_lumped_stiffness:\n position = node.global_dof\n self.nodes_connected_to_springs.append(node)\n list_Kdata.append(self.get_bc_array_for_all_frequencies(node.loaded_table_for_lumped_stiffness, node.lumped_stiffness))\n i_indexes_K.append(position)\n j_indexes_K.append(position)\n\n # processing mass added\n if node.there_are_lumped_masses:\n position = node.global_dof\n self.nodes_with_lumped_masses.append(node)\n list_Mdata.append(self.get_bc_array_for_all_frequencies(node.loaded_table_for_lumped_masses, node.lumped_masses))\n i_indexes_M.append(position)\n j_indexes_M.append(position)\n\n # processing damper added\n if node.there_are_lumped_dampings:\n position = node.global_dof\n self.nodes_connected_to_dampers.append(node)\n list_Cdata.append(self.get_bc_array_for_all_frequencies(node.loaded_table_for_lumped_dampings, node.lumped_dampings))\n i_indexes_C.append(position)\n j_indexes_C.append(position)\n flag_Clump = True\n \n for key, cluster_data in self.preprocessor.nodes_with_elastic_link_stiffness.items():\n node = self.preprocessor.nodes[int(key.split(\"-\")[0])]\n for indexes_i, indexes_j, data, in cluster_data:\n for i in range(2):\n i_indexes_K.append(indexes_i[i])\n j_indexes_K.append(indexes_j[i])\n list_Kdata.append(self.get_bc_array_for_all_frequencies(node.loaded_table_for_elastic_link_stiffness, data[i]))\n \n for key, cluster_data in self.preprocessor.nodes_with_elastic_link_dampings.items():\n node = self.preprocessor.nodes[int(key.split(\"-\")[0])]\n for indexes_i, indexes_j, data, in cluster_data:\n for i in range(2):\n i_indexes_C.append(indexes_i[i])\n j_indexes_C.append(indexes_j[i])\n list_Cdata.append(self.get_bc_array_for_all_frequencies(node.loaded_table_for_elastic_link_dampings, data[i]))\n\n data_Klump = np.array(list_Kdata).reshape(-1, cols)\n data_Mlump = np.array(list_Mdata).reshape(-1, cols)\n data_Clump = np.array(list_Cdata).reshape(-1, cols)\n \n i_indexes_K = np.array(i_indexes_K).flatten()\n i_indexes_M = np.array(i_indexes_M).flatten()\n i_indexes_C = np.array(i_indexes_C).flatten()\n\n j_indexes_K = np.array(j_indexes_K).flatten()\n j_indexes_M = np.array(j_indexes_M).flatten()\n j_indexes_C = np.array(j_indexes_C).flatten()\n\n full_K = [csr_matrix((data_Klump[:,j], (i_indexes_K, j_indexes_K)), shape=[total_dof, total_dof]) for j in range(cols)]\n full_M = [csr_matrix((data_Mlump[:,j], (i_indexes_M, j_indexes_M)), shape=[total_dof, total_dof]) for j in range(cols)]\n full_C = [csr_matrix((data_Clump[:,j], (i_indexes_C, j_indexes_C)), shape=[total_dof, total_dof]) for j in range(cols)]\n \n K_lump = [sparse_matrix[self.unprescribed_indexes, :][:, self.unprescribed_indexes] for sparse_matrix in full_K]\n M_lump = [sparse_matrix[self.unprescribed_indexes, :][:, self.unprescribed_indexes] for sparse_matrix in full_M]\n C_lump = [sparse_matrix[self.unprescribed_indexes, :][:, self.unprescribed_indexes] for sparse_matrix in full_C]\n\n Kr_lump = [sparse_matrix[:, self.prescribed_indexes] for sparse_matrix in full_K]\n Mr_lump = [sparse_matrix[:, self.prescribed_indexes] for sparse_matrix in full_M]\n Cr_lump = [sparse_matrix[:, self.prescribed_indexes] for sparse_matrix in full_C]\n\n return K_lump, M_lump, C_lump, Kr_lump, Mr_lump, Cr_lump, flag_Clump", "def process(\n self, molecule_data: Tuple[str, Tuple[List[reads_pb2.Read],\n List[reads_pb2.Read]]]\n ) -> Iterable[Tuple[str, Tuple[List[reads_pb2.Read], List[reads_pb2.Read]]]]:\n\n (molecule_name, (subreads, label)) = molecule_data\n subreads_and_label = subreads + label\n # Subreads, PW and IP were already padded for subreads alone, but we need to\n # pad them again because label might be longer than previous pad length.\n subreads_and_labels_copy = copy.deepcopy(subreads_and_label)\n max_length = pad_reads(subreads_and_labels_copy)\n subreads_copy = subreads_and_labels_copy[:-1]\n label_copy = subreads_and_labels_copy[-1:]\n pad_pw_ip(subreads_copy, max_length)\n yield molecule_name, (subreads_copy, label_copy)", "def handle_nodes(result, media_id, owner_username):\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t\tnodes = result[1]\n\t\t\tdl = []\n\t\t\tfor n in nodes:\n\t\t\t\td2 = self._make_media_path(media_id, n, owner_username)\n\t\t\t\td2.addCallback(store)\n\t\t\t\td2.addCallback(lambda _: self.clear_renders(media_id, owner_username, n))\n\t\t\t\tdl.append(d2)\n\t\t\tdList = DeferredList(dl)\n\t\t\tdList.addCallback(lambda _: \"success\")\n\t\t\treturn dList", "def get_node_heights(self):\n\n # reset lists\n self.extant_h = []\n self.not_extant_h = []\n self.not_yet_sampled_h = []\n\n # the total height of the tree is the maximum distance from root to any tip\n root = self.hosttree.get_tree_root()\n apex = self.hosttree.get_farthest_leaf()[0]\n total_height = self.hosttree.get_distance(root, apex)\n\n # iterate through all nodes in the tree\n self.host_nodes = []\n for node in self.hosttree.traverse():\n # the node's depth is its distance from the root\n depth = self.hosttree.get_distance(root, node)\n # the node's height is the difference between its depth and the total height\n height = total_height-depth\n node.add_feature('height', height) # modify TreeNode in place \n\n # we do not allow zero branch length\n # node.dist = node.dist + (np.finfo(float).eps)\n if (node.dist == 0):\n greaterThan = False\n while greaterThan == False:\n node.dist += float_info.epsilon # add a small amount\n if (node.height+node.dist<=node.height):\n greaterThan = False\n else:\n greaterThan = True\n node.height += node.dist\n\n if node.is_leaf():\n # keep track of leaf nodes\n if node.height == 0:\n self.extant_h.append(node)\n continue # do not append to host_nodes list\n else:\n self.not_yet_sampled_h.append(node)\n\n # store the host node and its height\n self.host_nodes.append((node.height, node))\n\n self.host_nodes.sort() # order by ascending node height", "def add_data():\n neo = NeoData(\"neo4j://neo:7687\")\n data = neo.find_all()\n return data", "def add_nodes_from(self, nodes):\n self._Impl._nodes[\"all_nodes\"] = cudf.Series(nodes)" ]
[ "0.6460555", "0.5887683", "0.56105876", "0.56016797", "0.5551757", "0.5506774", "0.5503824", "0.5480824", "0.54083604", "0.5394268", "0.5391553", "0.53643715", "0.5339283", "0.5328205", "0.5325319", "0.5300631", "0.52970463", "0.52842844", "0.52776074", "0.526741", "0.52617437", "0.52275884", "0.5188136", "0.51826525", "0.5129071", "0.5117738", "0.51122135", "0.51080143", "0.51075697", "0.51014996", "0.5100128", "0.50935465", "0.50920933", "0.50883615", "0.50805277", "0.5072428", "0.50717735", "0.50698507", "0.50658864", "0.5053406", "0.50202376", "0.5019921", "0.5016992", "0.49959108", "0.49913025", "0.49851155", "0.49788266", "0.49759853", "0.49746487", "0.4966711", "0.4964926", "0.49627522", "0.4958901", "0.49545076", "0.49544042", "0.49535444", "0.49430904", "0.493976", "0.49336755", "0.49322844", "0.49311337", "0.4924997", "0.49247736", "0.49190482", "0.49129546", "0.49127135", "0.48994607", "0.48980704", "0.48960006", "0.48918653", "0.48863664", "0.48844516", "0.48843032", "0.4881561", "0.48813796", "0.48770094", "0.48696405", "0.486918", "0.48435003", "0.4838257", "0.48334542", "0.48272014", "0.48240697", "0.48225847", "0.48141825", "0.4810668", "0.48100394", "0.48097456", "0.48075345", "0.48048222", "0.47987008", "0.47975025", "0.47972077", "0.47813216", "0.4781104", "0.47782785", "0.47782493", "0.47774574", "0.47770524", "0.47767842" ]
0.67186177
0
number_to_send has to be predefined in each node
def bcast_number_to_all(number_to_send, source_node = 0): from mpi import mpi_bcast, MPI_INT, MPI_COMM_WORLD, MPI_FLOAT import types if type(number_to_send) is types.IntType: TMP = mpi_bcast(number_to_send, 1, MPI_INT, source_node, MPI_COMM_WORLD) return int(TMP[0]) elif type(number_to_send) is types.FloatType: TMP = mpi_bcast(number_to_send, 1, MPI_FLOAT, source_node, MPI_COMM_WORLD) return float(TMP[0]) else: print " ERROR in bcast_number_to_all"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sendnum(self, n):\n self.sendline(str(n))", "def send(self, x):\n print x", "def send(value):\r\n return value", "def numeric(self, source, numeric, target, *args):\n\n if numeric == ERR_NICKNAMEINUSE:\n self.fire(NICK(\"{0:s}_{1:d}\".format(args[0], randint(0, 32768))))\n elif numeric in (RPL_ENDOFMOTD, ERR_NOMOTD):\n for irc_channel in self.irc_channels:\n self.fire(JOIN(irc_channel['name'], keys=irc_channel.get('key', None)))", "def sendnumber(self, number):\n\n import numbers\n\n if not isinstance(number, numbers.Number):\n raise ValueError('not a number: %r' % number)\n\n self.sendraw(str(number))", "def send_messages(_) -> int:\n return 1 << 11", "def send_messages(_) -> int:\n return 1 << 11", "def send(self, value):\n pass", "def sendto(self, data: bytes, address: Tuple) -> int:\n ...", "def numeric(self, source, numeric, *args):\n if numeric == ERR_NICKNAMEINUSE:\n self.fire(NICK(f\"{args[0]:s}_\"))\n elif numeric in (RPL_ENDOFMOTD, ERR_NOMOTD):\n self.fire(JOIN(self.ircchannel))", "def number_of_nodes(self, number_of_nodes):\n\n self._number_of_nodes = number_of_nodes", "def send_node(self) -> str:\n node = self.current_node\n MDI_Send(node, MDI_COMMAND_LENGTH, MDI_CHAR, self.comm)\n return node", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def _send(self, what, value, address, **kwargs):\n\n print('_send: please override me.')", "def number(self):", "def send_req(self):\n self.n_send_req += 1", "def getNumber():", "def addTN(self, num=1):\n self.tn += num", "def _send(self, action: List[np.ndarray]) -> None:", "def set_num(self, num):\n self.cmd_num = num", "def via_number(self, via_number):\n self._via_number = via_number", "def send(self, data):", "def nr():\n pass", "def invoke(self, msg, req):\n node = Node.create()\n node.acquire_lock()\n\n if msg.name == 'forward':\n try:\n with node.graph.as_default():\n if node.num_devices == 5:\n output, name = Model_5.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 6:\n output, name = Model_6.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 7:\n output, name = Model_7.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 8:\n output, name = Model_8.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n\n node.release_lock()\n return\n\n except Exception, e:\n node.log('Error', e.message)\n elif msg.name == 'update':\n \"\"\"update this node's task configuration,based on the received massage \"\"\"\n try:\n node.num_devices = req['num_devices']\n available_ip = req['available_ip']\n\n update_ip(get_file(node.num_devices), available_ip)\n load_ip(node)\n\n node.release_lock()\n return\n\n except Exception, e:\n node.log('Error', e.message)\n\n else:\n raise schema.AvroException('unexpected message:', msg.getname())", "def update_node_count(self, node, add_to_count):\r\n current_score = 0\r\n count_string = self.parser.getAttribute(node, 'gravityNodes')\r\n if count_string:\r\n current_score = int(count_string)\r\n\r\n new_score = current_score + add_to_count\r\n self.parser.setAttribute(node, \"gravityNodes\", str(new_score))", "def update_node_count(self, node, add_to_count):\n current_score = 0\n count_string = self.parser.getAttribute(node, 'gravityNodes')\n if count_string:\n current_score = int(count_string)\n\n new_score = current_score + add_to_count\n self.parser.setAttribute(node, \"gravityNodes\", str(new_score))", "def node_link_num_data(th_object, start, end, file_name):\n # close\n node_link_num(th_object, start, end, file_name, \"20\", \"10\", \"extracted_data/Link_number/\")\n node_link_num(th_object, start, end, file_name, \"20\", \"30\", \"extracted_data/Link_number/\")\n node_link_num(th_object, start, end, file_name, \"22\", \"11\", \"extracted_data/Link_number/\")\n node_link_num(th_object, start, end, file_name, \"22\", \"31\", \"extracted_data/Link_number/\")\n node_link_num(th_object, start, end, file_name, \"22\", \"10\", \"extracted_data/Link_number/\")\n node_link_num(th_object, start, end, file_name, \"20\", \"31\", \"extracted_data/Link_number/\")\n node_link_num(th_object, start, end, file_name, \"20\", \"31\", \"extracted_data/Link_number/\")\n\n # far side\n node_link_num(th_object, start, end, file_name, \"20\", \"70\", \"extracted_data/Link_number/\")\n node_link_num(th_object, start, end, file_name, \"20\", \"90\", \"extracted_data/Link_number/\")\n node_link_num(th_object, start, end, file_name, \"22\", \"71\", \"extracted_data/Link_number/\")\n node_link_num(th_object, start, end, file_name, \"20\", \"91\", \"extracted_data/Link_number/\")\n node_link_num(th_object, start, end, file_name, \"22\", \"70\", \"extracted_data/Link_number/\")\n node_link_num(th_object, start, end, file_name, \"20\", \"91s\", \"extracted_data/Link_number/\")", "def generate (self, n, ind = 0):\n\n addr = \"chirt1qcmdxwpu35mqlzxz3alc9u9ztp22edsuc5s7zzk\"\n self.generatetoaddress (self.nodes[ind], n, addr)", "def _nodeNumToId(self, num):\n if num == BROADCAST_NUM:\n return BROADCAST_ADDR\n\n try:\n return self.nodesByNum[num][\"user\"][\"id\"]\n except:\n logging.warn(\"Node not found for fromId\")\n return None", "def send_nrpn(self, param=0, value=0, ch=None):\n if isinstance(param, int):\n param_msb = (param >> 7)\n param_lsb = param\n else:\n param_msb, param_lsb = param\n\n if param_msb is not None:\n self.send_control_change(NRPN_MSB, param_msb, ch=ch)\n\n if param_lsb is not None:\n self.send_control_change(NRPN_LSB, param_lsb, ch=ch)\n\n if isinstance(value, int):\n value_msb = (value >> 7)\n value_lsb = value\n else:\n value_msb, value_lsb = value\n\n if value_msb is not None:\n self.send_control_change(DATA_ENTRY_MSB, value_msb, ch=ch)\n\n if value_lsb is not None:\n self.send_control_change(DATA_ENTRY_LSB, value_lsb, ch=ch)", "def publish_number(self):\n msg = Int64() # creating message\n msg.data = self.number_ # assigning message data\n self.publisher_.publish(msg) # publishing message data", "def before_send(self):", "def test_9_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(9):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def get_node_number(self, type_string):\n if type_string in self.node_dict:\n return self.node_dict[type_string]\n\n print(\"ERROR!! node type string is wrong\", type_string)\n quit()", "def set_node_variable_number(self, number):\n if number == 0: # pragma: no cover\n return\n\n self._f.dimensions[\"num_nod_var\"] = number\n\n self._f.create_variable(\n \"name_nod_var\", (\"num_nod_var\", \"len_name\"),\n dtype=\"|S1\", **self._comp_opts)\n\n for _i in range(number):\n name = \"vals_nod_var%i\" % (_i + 1)\n self._f.create_variable(\n name, (\"time_step\", \"num_nodes\"),\n dtype=self.__f_dtype, **self._comp_opts)", "def node_link_num(th_object, start, end, file_name, node1, node2, path):\n save_path = path + node1 + \"_\" + node2 + \"_vs_t.csv\"\n links_num = th_object.get_node_links_num(start, end, file_name, [node1, node2])\n with open(save_path, \"w+\") as f:\n f.write(\"Time,No_Link\\n\")\n for k in file_name:\n f.write(str(k)[11:-7] + \",\" + str(links_num[k]) + \"\\n\")\n print(node1 + \" \" + node2 + \" link number exported\")", "def increment_number_served(self, numbers):\n\t\tself.number_served += numbers", "def set_number_served(self, numbers):\n\t\tself.number_served = numbers", "def send(x, communicator, rank, tag=0):\n chainer.utils.experimental('chainermn.functions.send')\n\n if rank == communicator.rank:\n raise ValueError(\n 'rank must be different from communicator rank, '\n 'otherwise deadlock occurs')\n\n xp = backend.get_array_module(*x)\n\n # Dummy variable to retain gradient computation of send,\n # otherwise the corresponding recv will cause deadlock in backward\n # in the case where all inputs for this function does not require_grad.\n dummy_var = chainer.Variable(xp.array([], dtype=xp.float32))\n\n if isinstance(x, list) or isinstance(x, tuple):\n inputs = x + type(x)([dummy_var])\n delegate_variable = Send(\n communicator, peer_rank=rank, peer_tag=tag)(*inputs)\n else:\n delegate_variable = Send(\n communicator, peer_rank=rank, peer_tag=tag)(x, dummy_var)\n\n delegate_variable.name = 'delegate_variable'\n return delegate_variable", "def build_send(self, *args, **kwargs):\n raise NotImplementedError(\"Implement in subclass\")", "def send_data(no_of_packets):\n generate = data_buf_pb2.Send()\n generate.nop = no_of_packets\n data = generate.SerializeToString()\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create socket and send data\n s.connect((receiver_node_ip, receiver_node_port))\n s.sendall(data)\n s.close()", "def send(self, data: bytes) -> int:\n ...", "def __init__(self, number):\n self.number = number\n self.available = True", "def __send__(self,val):\n assert(len(val) == 1)\n assert(type(val) == bytes)\n v = int.from_bytes(val,byteorder=\"little\")\n if(self.verbose):\n pc.color_stdout(\"GREEN\")\n print(\">> %s\\t - %s\\t - %d\"% (hex(v),bin(v),v))\n pc.color_stdout(\"RESET\")\n self.port.write(val)", "def _send_multiple(self, what, values, address):\n\n print('_send_multiple: please override me.')", "def _set_senders_reference_20(self, val):\n self.swift_obj.SendersReference = val\n self.swift_obj.SendersReference.swiftTag = \"20\"", "async def d10(self, ctx):\n await ctx.send(random.randint(1, 10))", "def send_spam_msg(driver, name, message, n):\r\n\r\n for i in range(n):\r\n send_message(driver, name, message)", "def line(self, number, text, send=True):\n data = self.data\n i = number-1\n data[i] = text\n text = data[0]+'^'+data[1]\n\n if( rpi_device and send ): self.message(text)\n if( send ):\n logging.debug( \"================\" )\n logging.debug( \"%s\" % data[0] )\n logging.debug( \"%s\" % data[1] )\n logging.debug( \"================\" )", "def test_5_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(5):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def updateNodeCount(self, node, addToCount):\n currentScore = 0\n countString = node.attrib.get('gravityNodes')\n if countString:\n currentScore = int(countString)\n \n newScore = currentScore + addToCount\n node.set(\"gravityNodes\", str(newScore))", "def __init__(self, name=\"\", number=None):\n super().__init__(\"number\", name)\n if number is not None:\n self.number = number", "def _number_nodes_helper(tree: HuffmanTree, number: int = 0) -> int:\n if tree.is_leaf():\n return number - 1\n else:\n number = _number_nodes_helper(tree.left, number) + 1\n number = _number_nodes_helper(tree.right, number) + 1\n tree.number = number\n return number", "def weight(self):", "def __init__(self, node_number, all_nodes):\n self.self_host = 'localhost'\n self.base_port = 5000\n self.self_port = self.base_port + node_number\n self.other_nodes = [(self.self_host, p)\n for p in range(self.base_port + 1, self.base_port + all_nodes + 1)\n if p != self.self_port]\n self.other_nodes_len = all_nodes - 1\n self.current_node = 0", "def send_node_props(self, host_info):\n se = get_se()\n version = get_version()\n name = host_info.get_hostname()\n unique_id = '%s:Pool:%s' % (se, name)\n parent_id = \"%s:SE:%s\" % (se, se)\n\n sa = StorageElement.StorageElement()\n sar = StorageElementRecord.StorageElementRecord()\n sa.UniqueID(unique_id)\n sa.Name(name)\n sa.SE(se)\n sa.SpaceType(\"Pool\")\n sa.Implementation(XRD_NAME)\n sa.Version(version)\n sa.Status(XRD_STATUS)\n sa.ParentID(parent_id)\n sa.Timestamp(timestamp)\n sar.Timestamp(timestamp)\n sar.UniqueID(unique_id)\n sar.MeasurementType(\"raw\")\n sar.StorageType(\"disk\")\n sar.TotalSpace(1024*host_info.get_total_kb())\n sar.FreeSpace(1024*host_info.get_total_free_kb())\n sar.UsedSpace(1024*host_info.get_total_used_kb())\n Gratia.Send(sa)\n Gratia.Send(sar)", "def onRegisterNetworkNode(self):\n pass", "def num_nodes(self, ntype: str = None) -> int:\n if ntype:\n return self.num_nodes_dict[ntype]\n else:\n return self.total_number_of_nodes", "def warp():\n # Magic to get est # of nodes in graph from restful API\n resp = requests.get(NEODB[:-4] +\n \"manage/server/jmx/domain/org.neo4j/instance%3Dkernel%230%2Cname%3DPrimitive%20count?_=1342719685294\")\n rdict = resp.json()[0]\n for i in range(len(rdict['attributes'])):\n if rdict['attributes'][i]['name'] == \"NumberOfNodeIdsInUse\":\n nodeCount = rdict['attributes'][i]['value']\n \n # try 10 times to get a random node\n for i in range(10):\n r = random.randrange(0,nodeCount)\n query = q.format(\"*\")[:-6] + \" SKIP {0} LIMIT 1;\\n\".format(r)\n neoNodes, metadata = cypher.execute(G, query)\n if len(neoNodes) > 0:\n node = neoNodes[0][0]\n return node._id\n # if we can't find a random node, return node 0\n return 0", "def test_6_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(6):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def __init__(self):\n self.number: int", "def __init__(self, owner1: 'ln.LightningNode', owner2: 'ln.LightningNode'):\n self.address = ''.join(random.choices(string.ascii_uppercase + string.digits, k=8))\n self.owner1 = owner1\n self.owner2 = owner2\n self.total_msat = 0 # will be changed as owners deposit funds.", "def setNumWorkers(self, num):\r\n self.numWorkers = num", "def __init__(self, node):\n super().__init__(node, USB_MOTION_ID)\n self.node_callbacks = (USB_AVAILABLE_ID, USB_MOTION_ID)", "def send_one(self, target):\n # NOTE: Variable nodes in self.neighbors are in same order as in the\n # factor table tuples.\n target_index = self.neighbors.index(target)\n msg = -np.Inf * np.ones(len(target.domain))\n for comb, fvalue in self.table.items():\n s = 0\n for i, vnode in enumerate(self.neighbors):\n if vnode != target:\n s += self.received[vnode][comb[i]]\n s += fvalue\n msg[comb[target_index]] = np.logaddexp(msg[comb[target_index]], s)\n target.receive(self, msg)", "def sendtoserial(self, module, msg):\n self.send(\"sendserial/{}/{}:{}\\n\".format(self.msg_id, module, msg))\n self.msg_id += 1", "def type_number(number: float):\n # TODO: Allow leading zeros\n for char in str(number):\n actions.key(char)", "def test_7_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(7):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def _sendJunk(self, n):\n junk = ''.join(['x' for i in range(n)])\n written = 0\n while written < n:\n retval = self.s.send(junk[written:])\n self.sent += retval\n written += retval\n self.assertEquals(n, written)", "def send(self, data):\n pass", "def send_generate_info(context_tokens_tensor, context_length_tensor, tokens_to_generate, all_probs, temperature):\n # Send the sizes of the tensors\n input_info = [context_tokens_tensor.size(0), context_tokens_tensor.size(1), tokens_to_generate, all_probs, temperature]\n input_info_tensor = torch.cuda.FloatTensor(input_info)\n torch.distributed.broadcast(input_info_tensor, 0)\n\n # Send variables to all ranks \n torch.distributed.broadcast(context_length_tensor, 0)\n torch.distributed.broadcast(context_tokens_tensor, 0)", "def set_number(self, number):\n self.number = number", "def n(self, n) :\n\t\ttry :\n\t\t\tself._n = n\n\t\texcept Exception as e:\n\t\t\traise e", "def send_to_engine(self, wi):\n pass", "def _number_xpad(self):\n js_path = self._device_path.replace('-event', '')\n js_chardev = os.path.realpath(js_path)\n try:\n number_text = js_chardev.split('js')[1]\n except IndexError:\n return\n try:\n number = int(number_text)\n except ValueError:\n return\n self.__device_number = number", "def send(self, name):\n router = self.routers[name]\n for neighbour in router.neighbours:\n neighbour = self.routers[neighbour]\n neighbour.receive_routing_table(router)", "def test_4_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(4):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def node_count(self, node_count):\n\n self._node_count = node_count", "def send_rpn(self, param=0, value=0, ch=None):\n if isinstance(param, int):\n param_msb = (param >> 7)\n param_lsb = param\n else:\n param_msb, param_lsb = param\n\n if param_msb is not None:\n self.send_control_change(RPN_MSB, param_msb, ch=ch)\n\n if param_lsb is not None:\n self.send_control_change(RPN_LSB, param_lsb, ch=ch)\n\n if isinstance(value, int):\n value_msb = (value >> 7)\n value_lsb = value\n else:\n value_msb, value_lsb = value\n\n if value_msb is not None:\n self.send_control_change(DATA_ENTRY_MSB, value_msb, ch=ch)\n\n if value_lsb is not None:\n self.send_control_change(DATA_ENTRY_LSB, value_lsb, ch=ch)", "def bus_get_unique_node_number(self, start_number: int = 0) -> int:\n start_number = Base.check_int_param(start_number)\n return self.dss_obj.BUSI(3, start_number)", "def _make_sendrecv(self, f, fixed, **kwargs):\n return", "def __init__(self, n=1, cpu=.1, bw=10, delay=None,\n max_queue_size=None, **params):\n\n # Initialize topo\n Topo.__init__(self, **params)\n\n # Host and link configuration\n hconfig = {'cpu': cpu}\n lconfig = {'bw': bw, 'delay': delay,\n 'max_queue_size': max_queue_size }\n\n # Create the actual topology\n receiver = self.addHost('receiver')\n\n # Switch ports 1:uplink 2:hostlink 3:downlink\n uplink, hostlink, downlink = 1, 2, 3\n\n # The following template code creates a parking lot topology\n # TODO: Replace the template code to create a parking lot topology for any arbitrary N (>= 1)\n if n < 1: # network must have at least 1 host\n return -1\n\n s = [] # Python list of switches\n h = [] # Python list of hosts\n\n # dynamically add all hosts and switches to network backbone first\n for i in range(n):\n switch_name = 's%s' % (i+1)\n host_name = 'h%s' % (i+1)\n\n s.append( self.addSwitch(switch_name) ) # s[0] is switch1\n h.append( self.addHost(host_name) ) # h[0] is host1\n\n # Wire up clients\n self.addLink(h[i], s[i], port1=0, port2=hostlink, **lconfig)\n \n # link to previous switch\n if i > 0:\n self.addLink(s[i-1], s[i], port1=downlink, port2=uplink, **lconfig)\n\n \n # Wire up receiver to first switch\n self.addLink(receiver, s[0], port1=0, port2=uplink, **lconfig)\n\n '''\n # for N = 1\n # Begin: Template code\n s1 = self.addSwitch('s1')\n h1 = self.addHost('h1', **hconfig)\n\n # Wire up receiver\n self.addLink(receiver, s1, port1=0, port2=uplink, **lconfig)\n\n # Wire up clients\n self.addLink(h1, s1, port1=0, port2=hostlink, **lconfig)\n\n # Uncomment the next 8 lines to create a N = 3 parking lot topology\n s2 = self.addSwitch('s2')\n h2 = self.addHost('h2', **hconfig)\n self.addLink(s1, s2,\n port1=downlink, port2=uplink, **lconfig)\n self.addLink(h2, s2,\n port1=0, port2=hostlink, **lconfig)\n\n s3 = self.addSwitch('s3')\n h3 = self.addHost('h3', **hconfig)\n self.addLink(s2, s3,\n port1=downlink, port2=uplink, **lconfig)\n self.addLink(h3, s3,\n port1=0, port2=hostlink, **lconfig)\n \n # End: Template code\n '''", "def get_node_string(self, type_number):\n if type_number in self.node_backward_dict:\n return self.node_backward_dict[type_number]\n\n print(\"ERROR!! node type number is wrong\", type_number)\n quit()", "def sendto(self, name, msg):\n self.send(\"send/{}/{}:{}\".format(self.msg_id, name, msg))\n self.msg_id += 1", "def __init__(self, n):\n self.n = n", "def __init__(self, n):\n self.n = n", "def after_send(self):", "def increment_node_index(self):\n self.node_index += 1", "def __init__(self, source_node, source_gate_name, target_node, target_slot_name, weight=1):\n self.link(source_node, source_gate_name, target_node, target_slot_name, weight)", "def create_nodes(self):", "def n(self):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def reset_number_of_module_species(self, num_module_species, generation_number):\n for node in self._nodes.values():\n node.set_species_upper_bound(num_module_species, generation_number)" ]
[ "0.6436164", "0.5802865", "0.55402374", "0.5511573", "0.5425965", "0.5417966", "0.5417966", "0.5406537", "0.5385631", "0.5377031", "0.5364401", "0.5363881", "0.53416944", "0.53416944", "0.53416944", "0.53416944", "0.53416944", "0.53416944", "0.5333438", "0.5325405", "0.53246105", "0.528373", "0.527409", "0.52659595", "0.5210332", "0.5206952", "0.52028894", "0.51902884", "0.51526046", "0.5129767", "0.51138085", "0.5098759", "0.50930345", "0.50772655", "0.5070894", "0.50629157", "0.5052279", "0.50150836", "0.5002946", "0.50024605", "0.50003517", "0.49961", "0.49857363", "0.49830413", "0.49816346", "0.4976298", "0.49719512", "0.49600577", "0.4944075", "0.49332353", "0.4931794", "0.4926713", "0.48970369", "0.48963675", "0.48923624", "0.48892835", "0.4883217", "0.48816386", "0.48750606", "0.48739433", "0.4844294", "0.4843554", "0.48335025", "0.4825373", "0.482021", "0.48202026", "0.4818272", "0.48078614", "0.48047975", "0.48038316", "0.48007232", "0.47905213", "0.47851905", "0.47845793", "0.47825944", "0.4779821", "0.4773454", "0.4772976", "0.47652778", "0.47570866", "0.47559932", "0.47530994", "0.4748976", "0.4743358", "0.4737391", "0.47345215", "0.47339448", "0.47331315", "0.4725121", "0.4722738", "0.4722738", "0.47209543", "0.47201863", "0.4718901", "0.47147113", "0.47095433", "0.47074348", "0.47074348", "0.47074348", "0.47037366" ]
0.53139603
21
write headers from files in data into a disk file called filename. The filename has to be either hdf or bdb. lima list with positions in the disk files into which headers will be written, i.e., header from data[k] will be written into file number lima[k]
def write_headers(filename, data, lima): from utilities import file_type from EMAN2db import db_open_dict ftp = file_type(filename) if ftp == "bdb": # For unknown reasons this does not work on Linux, but works on Mac ??? Really? DB = db_open_dict(filename) for i in range(len(lima)): DB.set_header(lima[i], data[i]) DB.close() #for i in range(len(lima)): # data[i].write_image(filename, lima[i]) elif ftp == "hdf": for i in range(len(lima)): data[i].write_image(filename, lima[i], EMUtil.ImageType.IMAGE_HDF, True) else: ERROR("Unacceptable file format","write_headers",1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_header(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\tDB = db_open_dict(filename)\n\t\tDB.set_header(lima, data)\n\telif ftp == \"hdf\":\n\t\tdata.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def write_to_file(data, filename):\n fimg = fits.HDUList()\n fimghdu = fits.PrimaryHDU()\n fimghdu.data = data\n fimg.append(fimghdu)\n fimg.writeto(filename, overwrite=True)\n print(' wrote output data to: ', filename)", "def _save_to_file(filename, data, start=0, header_size=None):\n if header_size is None:\n header_size = 0\n item_dtype = data.dtype\n # Open file as necessary\n opened = False\n if isinstance(filename, str):\n fd = open(filename, 'rb+')\n opened = True\n else:\n fd = filename\n # Seek to halo location and write\n offset = header_size + (start * item_dtype.itemsize)\n fd.seek(offset, os.SEEK_SET)\n data.tofile(fd)\n if opened:\n fd.close()", "def write_data(fh, header, data):\r\n # fhw = open(filename, \"w\")\r\n fh.write(str(header))\r\n fh.write(str(data) + \"\\n\")", "def _create_header_file(tensor_name, npy_data, output_path, data_linkage):\n file_path = pathlib.Path(f\"{output_path}/\" + tensor_name).resolve()\n # create header file\n raw_path = file_path.with_suffix(\".h\").resolve()\n with open(raw_path, \"w\") as header_file:\n header_file.write(\"#include <stddef.h>\\n\")\n header_file.write(\"#include <stdint.h>\\n\")\n header_file.write(\"#include <dlpack/dlpack.h>\\n\")\n header_file.write(f\"const size_t {tensor_name}_len = {npy_data.size};\\n\")\n\n _emit_data_linkage(header_file, data_linkage)\n\n header_file.write(f\"{NP_TYPE_TO_C[str(npy_data.dtype)]} {tensor_name}[] =\")\n\n header_file.write(\"{\")\n for i in np.ndindex(npy_data.shape):\n header_file.write(f\"{npy_data[i]}, \")\n header_file.write(\"};\\n\\n\")", "def write(self, filename, data, hdr):\n pass", "def write_data_2(fh, header, data):\r\n # fhw = open(filename, \"w\")\r\n if len(header) <= 0 or len(data) <= 0:\r\n return\r\n else:\r\n fh.write(str(header + \"\\n\"))\r\n fh.write(str(data) + \"\\n\")\r\n fh.write(\"\\n\")", "def writeheader(filename, header):\n # convert string to [unsigned] byte array\n hh = np.zeros(512, dtype='uint8')\n for i, ss in enumerate(header):\n hh[i] = ord(ss)\n # write header to file\n file_arr = np.memmap(filename, dtype='uint8', mode='r+', shape=(512,))\n file_arr[:512] = hh[:]\n del file_arr\n return", "def write(filename, data, extname=None, extver=None, header=None,\n clobber=False, ignore_empty=False, units=None, table_type='binary',\n names=None, write_bitcols=False, compress=None, tile_dims=None,\n **keys):\n if keys:\n import warnings\n warnings.warn(\n \"The keyword arguments '%s' are being ignored! This warning \"\n \"will be an error in a future version of `fitsio`!\" % keys,\n DeprecationWarning, stacklevel=2)\n\n kwargs = {\n 'clobber': clobber,\n 'ignore_empty': ignore_empty\n }\n with FITS(filename, 'rw', **kwargs) as fits:\n fits.write(data,\n table_type=table_type,\n units=units,\n extname=extname,\n extver=extver,\n compress=compress,\n header=header,\n names=names,\n write_bitcols=write_bitcols,\n tile_dims=tile_dims)", "def edf_write(data, file_name, header_size=1024):\n # get current time\n from time import gmtime, strftime\n today = strftime('%d-%b-%Y', gmtime())\n size = np.shape(data)\n print('data size in pixels is ', size)\n nbytes = np.prod(size) * data.dtype.itemsize\n print('opening', file_name, 'for writing')\n # craft an ascii header of the appropriate size\n f = open(file_name, 'wb')\n head = '{\\n'\n head += 'HeaderID = EH:000001:000000:000000 ;\\n'\n head += 'Image = 1 ;\\n'\n head += 'ByteOrder = LowByteFirst ;\\n'\n head += 'DataType = %13s;\\n' % numpy_to_esrf_datatype(data.dtype)\n print('using data type %s' % numpy_to_esrf_datatype(data.dtype))\n head += 'Dim_1 = %4s;\\n' % size[0]\n if len(size) > 1: head += 'Dim_2 = %4s;\\n' % size[1]\n if len(size) > 2: head += 'Dim_3 = %4s;\\n' % size[2]\n head += 'Size = %9s;\\n' % nbytes\n head += 'Date = ' + today + ' ;\\n'\n for i in range(header_size - len(head) - 2):\n head += ' '\n head += '}\\n'\n f.write(head.encode('utf-8'))\n if len(data.shape) == 3:\n s = np.ravel(data.transpose(2, 1, 0)).tostring()\n elif len(data.shape) == 2:\n s = np.ravel(data.transpose(1, 0)).tostring()\n else:\n s = np.ravel(data).tostring()\n f.write(s)\n f.close()", "def tabser(filename, body, data):\n # XXX checksums ignored\n head = Struct(\"!BiHBxxxB\")\n body = Struct(body)\n # foot = Struct(\"!4s\")\n\n buffer = bytearray([0] * (2 ** 16))\n head.pack_into(buffer, 0, 0, int(time()), len(data), body.size, 0),\n offset = head.size\n for row in data:\n body.pack_into(buffer, offset, *row, 0)\n offset += body.size\n else:\n print(\"write %d rows\" % len(data))\n # offset = 2 ** 16 - foot.size\n # foot.pack_into(buffer, offset, bytes([0, 0, 0, 0]))\n with open(filename, \"wb\") as f:\n f.write(buffer)", "def make_odb_header(odbfile, dataset):\n \n header = 'headers/' + dataset + '_header.dat'\n \n if not os.path.isfile ( header ):\n print(' Creating the header file for the dataset: ', dataset )\n if dataset in ('era5_1','era5_2'):\n \n odbfile = odbfile.replace('.gz','')\n else:\n odbfile = odbfile.replace('.gz','').replace('.conv._','.conv.')\n \n rdata=subprocess.check_output([\"odb\",\"header\", odbfile ])\n \n with open( header , 'wb' ) as f:\n f.write(rdata) \n \n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n') \n \n else:\n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n')\n #print(' Done reading the existing header file for the dataset: ', dataset )\n \n columns, kinds, tdict =[] , [] , {} \n \n for r in rdata[2:-2]:\n try:\n \n if r[:6]=='Header':\n break\n else: \n columns.append(r.split('name: ')[1].split(',')[0])\n kinds.append(r.split('type: ')[1].split(',')[0])\n if kinds[-1]=='REAL':\n tdict[columns[-1]]=numpy.float32\n elif 'INTEGER' in kinds[-1] or 'BITFIELD' in kinds[-1]:\n #print(columns[-1])\n if columns[-1]=='sonde_type@conv' or columns[-1]=='station_type@conv':\n tdict[columns[-1]]=numpy.float32\n else: \n tdict[columns[-1]]=numpy.int32\n else:\n tdict[columns[-1]]=numpy.dtype('S') # dict containng column name and type\n \n except IndexError:\n pass \n \n \"\"\" This is done otherwise for the era5 databases (1759,1761,3188) the tdict has different length than the columns list.\n So the following call alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) breaks \"\"\" \n for t in tdict.keys():\n if t not in columns:\n #print(\"Removing non appearing fb column: \" , c) \n del tdict[t]\n \n \"\"\" These values must be removed rom the fb, since they have NULL values and it creates problem with \n alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) \"\"\" \n \n if dataset in [\"era5_1759\", \"era5_1761\", \"era5_3188\"]:\n remove = ['sonde_type@conv' , \"eda_spread@errstat\", \"bias_volatility@body\" , \"timeseries_index@conv\"]\n for c in remove:\n #print(\"Removing wrong fb column: \" , c)\n try:\n columns.remove(c)\n del tdict[c]\n except:\n pass\n return columns, kinds, tdict", "def write_file(self,filename):\n \n with open(filename, 'w') as f:\n tab_width = np.max([len(k) for k in self.header.keys()])\n for k,v in self.header.items():\n f.write(u'{0}:\\t{1}\\n'.format(k, v).encode('utf8').expandtabs(tab_width+2))\n np.savetxt(f, self.data, fmt ='%f %f %f %d')", "def file_write(sp_length, sp_period, header, file_name):\n \n #specify filename and inform write\n out_file = open(file_name, \"w\")\n \n #add headers to file from list\n print(\"{0:>15}\".format(header[0]) ,\\\n \"{0:>15}\".format(header[1]) ,\\\n \"{0:>15}\".format(header[2]), file = out_file)\n \n #add data to file form lists \n for i in range(len(sp_length)):\n print(\"{0:>15}\".format(i) ,\\\n \"{0:>15.3f}\".format(sp_length[i]) ,\\\n \"{0:>15.3f}\".format(sp_period[i]), file = out_file)\n \n #close the file\n out_file.close()", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.txt\", \"w\")\n file.write(str(\"\\t\".join(hdata)) + \"\\n\")", "def write_header(indir, nb_landmark, nb_feature, mirror_factor, order_factor, feature_names=None):\n assert nb_landmark > 0\n assert os.path.exists(indir) and os.path.isdir(indir), indir + \" not found.\"\n if indir[-1] != os.sep:\n indir += os.sep\n axis = [\"x\", \"y\", \"z\"]\n header = \"ID\"\n for numb in range(1, nb_landmark + 1):\n for axe in axis:\n header += \",\" + axe + str(numb)\n if feature_names is not None:\n assert len(feature_names) == nb_feature\n header += \",\" + \",\".join(feature_names)\n else:\n for numb in range(1, nb_feature + 1):\n header += \",Feature\" + str(numb)\n header += \"\\n\"\n with open(indir + \"../landmarks.csv\", \"w\") as filep:\n filep.write(header)\n modif = \"\"\n if mirror_factor is not None:\n modif += \"_reversed\"\n if order_factor is not None:\n modif += \"_reordered\"\n if mirror_factor is not None or order_factor is not None:\n with open(indir + \"../landmarks\" + modif + \".csv\", \"w\") as filep:\n filep.write(header)", "def writeHeader( self ):\n for k in self.secondaryTargets.keys():\n fileName = self.treyGene[k] + \"-GenesinCommon.txt\" \n with open( fileName, 'w' ) as out:\n out.write(\"%s\\t%s\\t%s\\n\" %(\"Gene_trey\", \"Gene\", \"Gene_inCommon\" ))\n out.close()", "def write_data_to_file(data, filename):\n with open(filename, 'wb') as outfile:\n outfile.write(data)", "def save_header_default(filename, nhalos_per_tree):\n ntrees = len(nhalos_per_tree)\n nhalos = np.sum(nhalos_per_tree)\n dtype1 = np.dtype([('ntrees', 'i4'), ('totnhalos', 'i4')])\n x1 = np.array([(ntrees, nhalos)], dtype=dtype1)\n x2 = nhalos_per_tree.astype('i4')\n header_size = x1.nbytes + x2.nbytes\n # Open\n if isinstance(filename, str):\n fd = open(filename, 'wb')\n close = True\n else:\n fd = filename\n close = False\n # Write\n x1.tofile(fd)\n x2.tofile(fd)\n # Close\n if close:\n fd.close()\n return header_size", "def write_data_to_file(data, filename):\n\tif isinstance(data, pd.DataFrame):\n\t\tdata_to_print = data.values.tolist()\n\telse:\n\t\tdata_to_print = data\n\n\twith open(filename, 'w') as f:\n\t\tfor item in data_to_print:\n\t\t\tf.write(\"%s\\n\" % item)", "def writeheader(fh,colnames):\n for i in range(len(colnames)):\n fh.write('# %d %s\\n'%(i+1,colnames[i]))", "def _reportDataFile(self, dataFileName, outputFile):\n #subsequent access to the file should be open for \"append\"-ing\n f=self.openFile(outputFile, \"a\") #open for appending\n f.write ('<font face=\"verdana\" color=\" ' +AutoGrader.Const.HEADER_COLOR2 + '\"><br>\\n------------- ' + os.path.split(dataFileName)[1] + ' -------------</font>\\n')\n f.close()", "def gp_file(data,filename,output_dir='',order = [],head = False):\n f = open(output_dir + filename + '.csv', 'w')\n f.write(str(len(order)-1) + '\\n')\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n f.closed\n\n return None", "def to_file(self, filename):\n self.header['n'] = self.n\n save_gyre(filename, self.header, self.data)", "def write_file(data, filename):\n file = open(filename, \"wb\")\n file.write(data)\n file.close()", "def save_gyre(filename, header, data):\n with open(filename, 'wt') as f:\n header_length = len(list(header[()]))\n # if header_length == 4:\n # fmt = ''.join(['%6i','%26.16E'*3,'\\n'])\n # elif header_length == 5:\n # fmt = ''.join(['%6i','%26.16E'*3,'%6i\\n'])\n # else:\n # raise ValueError(\"header should have 4 or 5 components but \"\n # \"it appears to have %i\" % header_length)\n if not 'version' in header.dtype.names:\n fmt = ''.join(['%6i','%26.16E'*3,'\\n'])\n else:\n fmt = ''.join(['%6i','%26.16E'*3,'%6i\\n'])\n\n f.writelines([fmt % tuple(header[()])])\n\n N = len(data[0])-1\n fmt = ''.join(['%6i',' %26.16E'*N,'\\n'])\n for row in data:\n f.writelines([fmt % tuple(row)])", "def write_data_to_h5(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data=data, compression='gzip', compression_opts=9)\n f.close()", "def add_headers(headers, out):\r\n out.write(common.to_csv_line(headers, \"efficient\"))", "def write_header(self, fd):\n fd.write(f\"BEGIN {self.name}\")\n if len(self.data_items) > 0:\n if isinstance(self.data_items[0], mfdatascalar.MFScalar):\n one_based = (\n self.data_items[0].structure.type == DatumType.integer\n )\n entry = self.data_items[0].get_file_entry(\n values_only=True, one_based=one_based\n )\n else:\n entry = self.data_items[0].get_file_entry()\n fd.write(str(entry.rstrip()))\n if len(self.data_items) > 1:\n for data_item in self.data_items[1:]:\n entry = data_item.get_file_entry(values_only=True)\n fd.write(\"%s\" % (entry.rstrip()))\n if self.get_comment().text:\n fd.write(\" \")\n self.get_comment().write(fd)\n fd.write(\"\\n\")", "def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()", "def write_hdf5(filename, data):\n \n if '.h5' in filename:\n fid = h5py.File(filename, 'w')\n else:\n filename = filename+'.h5'\n fid = h5py.File(filename, 'w')\n\n print('Writing %s...'%filename)\n\n write_hdf5_group(fid, data)\n\n fid.close()\n print('Finished writting %s.'%filename)\n return", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.html\", \"w\")\n file.write(\"<html>\\n\\t<head>\\n\\t<style>\\n\" +\n \"\\t\\t\\ttable, th, td {border: 1px solid\\n\" +\n \"\\t\\t\\tblack;border-collapse: collapse;}\" +\n \"\\n\\t</style>\\n\" +\n \"\\t</head>\\n\\t<body>\\n\\t\\t<table style=\\\"width:100%\\\">\\n\")\n file.write(\"\\t\\t\\t<tr>\\n\")\n for line in hdata:\n file.write(\n \"\\t\\t\\t\\t\\t<th>\\n\\t\\t\\t\\t\\t\\t\"\n + str(line) + \"\\n\\t\\t\\t\\t\\t</th>\\n\")\n file.write(\"\\t\\t\\t</tr>\\n\")", "def write_ldat_header(self, datapath):\n contents = {}\n contents['ldat_type'] = self.ldat_type\n contents['filenametime'] = self.filenametime\n contents['station_id'] = self.station_id\n contents['rcusetup_cmds'] = self.rcusetup_cmds\n contents['beamctl_cmds'] = self.beamctl_cmds\n contents['rspctl_cmds'] = self.rspctl_cmds\n if self.caltabinfos != []:\n contents['caltabinfos'] = self.caltabinfos\n if self.septonconf:\n contents['septonconf'] = self.septonconf\n\n if not self.isLOFARdatatype(self.ldat_type):\n raise ValueError(\"Unknown LOFAR statistic type {}.\"\\\n .format(self.ldat_type))\n xtra = ''\n if self.ldat_type == 'acc':\n xtra = '_512x192x192'\n ldat_header_filename = (self.filenametime + '_' + self.ldat_type\n + xtra + '.h')\n with open(os.path.join(datapath, ldat_header_filename), 'w') as f:\n f.write('# LCU obs settings, header file\\n')\n f.write('# Header version'+' '+self.headerversion+'\\n')\n yaml.dump(contents, f, default_flow_style=False, width=1000)", "def save_fits(data, fname):\n\tcols = fits.ColDefs(np.copy(data)) # This is somehow necessary.\n\ttbhdu = fits.BinTableHDU.from_columns(cols)\n\ttbhdu.writeto(fname, clobber=True)\n\t\n\treturn", "def make_header_files():\n os.makedirs(DATA_DIR) if not os.path.exists(DATA_DIR) else None\n from dkistdataratemodel.units import frame\n from dkist_data_model.generator.dataproducts.visp import CalibratedVISP\n\n \"\"\"\n Generate VISP\n \"\"\"\n visp = CalibratedVISP(end_condition=20*frame)\n\n visp_files = visp.to_fits(\"sp_5_labelled\",\n path_template=os.path.join(DATA_DIR, 'visp_5d_{i:02d}.fits'))\n\n with ZipFile(os.path.join(DATA_DIR, \"visp.zip\"), \"w\") as myzip:\n for fname in visp_files:\n myzip.write(fname, os.path.split(fname)[1])\n os.remove(fname)\n\n \"\"\"\n Generate VTF\n \"\"\"\n from dkist_data_model.generator.dataproducts.vtf import CalibratedVTF\n vtf = CalibratedVTF(end_condition=96*frame)\n\n vtf_files = vtf.to_fits(\"5d_test\",\n path_template=os.path.join(DATA_DIR, 'vtf_5d_{i:02d}.fits'))\n\n with ZipFile(os.path.join(DATA_DIR, \"vtf.zip\"), \"w\") as myzip:\n for fname in vtf_files:\n myzip.write(fname, os.path.split(fname)[1])\n os.remove(fname)", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = glia.match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def write_data(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data = data, compression='gzip', compression_opts=9)\n f.close()", "def write_header(outfbfile, header_params, header):\n for hp in header_params:\n hdrval = sigproc.addto_hdr(hp, header[hp])\n outfbfile.write(hdrval)", "def write_header(self):\r\n if self.arguments['--out']:\r\n self.file = open(self.arguments['--out'], \"w+\")\r\n self.file.write(self.version)\r\n for list_item in self.list_of_header_objects:\r\n self.file.write(list_item.line)\r\n self.file.write(self.body_header_line.line)\r\n self.file.close()\r\n else:\r\n for list_item in self.list_of_header_objects:\r\n print(list_item.line)\r\n print(self.body_header_line.line)", "def WriteHeaderFileForCcmModel(filename, model): \n\n ccm_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n header_file = open(ccm_model_name + \".hpp\", 'w')\n\n #Define the header files\n header_file_defn = GetHeaderFileDefinitionString(filename, model)\n header_file.write(header_file_defn)\n\n #Include the appropriate files\n include_files = GetIncludedFilesForHeaderString()\n header_file.write(include_files)\n\n #Define the ODE System class\n ode_class = GetOdeClassDefinitionString(filename, model)\n header_file.write(ode_class)\n\n #Define the serialization\n serialization = GetSerializationInformationString(filename)\n header_file.write(serialization)\n\n #Define the SRN model\n srn_model_defn = GetModelDefinitionString(filename, model, True)\n header_file.write(srn_model_defn)\n\n #Close the file\n header_close = GetHeaderFileClosingString(filename, model)\n header_file.write(header_close)\n\n header_file.close()\n\n print(ccm_model_name + \".hpp written!\\n\")", "def write_name_file(self):\n fn_path = os.path.join(self.model_ws, self.mpnamefile)\n f_nam = open(fn_path, 'w')\n f_nam.write('%s\\n' % (self.heading))\n if self.mpbas_file is not None:\n f_nam.write('%s %3i %s\\n' % ('MPBAS', 86, self.mpbas_file))\n if self.dis_file is not None:\n f_nam.write('%s %3i %s\\n' % ('DIS', self.dis_unit, self.dis_file))\n if self.head_file is not None:\n f_nam.write('%s %3i %s\\n' % ('HEAD', 88, self.head_file))\n if self.budget_file is not None:\n f_nam.write('%s %3i %s\\n' % ('BUDGET', 89, self.budget_file))\n for u, f in zip(self.external_units, self.external_fnames):\n f_nam.write('DATA {0:3d} '.format(u) + f + '\\n')\n f_nam.close()", "def WriteHeaderFileForSrnModel(filename, model): \n\n srn_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n header_file = open(srn_model_name + \".hpp\", 'w')\n\n #Define the header files\n header_file_defn = GetHeaderFileDefinitionString(filename, model)\n header_file.write(header_file_defn)\n\n #Include the appropriate files\n include_files = GetIncludedFilesForHeaderString()\n header_file.write(include_files)\n\n #Define the ODE System class\n ode_class = GetOdeClassDefinitionString(filename, model)\n header_file.write(ode_class)\n\n #Define the serialization\n serialization = GetSerializationInformationString(filename)\n header_file.write(serialization)\n\n #Define the SRN model\n srn_model_defn = GetModelDefinitionString(filename, model, True)\n header_file.write(srn_model_defn)\n\n #Close the file\n header_close = GetHeaderFileClosingString(filename, model)\n header_file.write(header_close)\n\n header_file.close()\n\n print(srn_model_name + \".hpp written!\\n\")", "def write_headerfile(self, header_file, header):\n f = open(header_file, 'w')\n for iii in range(len(header)):\n outline = str(header[iii:iii+1]).strip().rstrip('END').strip()+'\\n'\n f.write(outline)\n f.close()", "def write_to_file(output, data, datafields):\n if (len(data) != len(datafields)):\n print \"Error! number of data fields != number of headers!\"\n print 'len: ', len(data), len(datafields)\n print 'shape: ', np.shape(data), np.shape(datafields)\n\n ofile = open(output,'w')\n ofile.write(\"# g(r) in the xy-plane from 2Drdf.py\\n\")\n header = \"# chunk \"\n for element in datafields:\n header += element + \" \"\n\n header = header + '\\n'\n ofile.write(header)\n \n it = 0\n for i in xrange(len(data[0])):\n line = str(it) + \" \"\n it += 1\n for j in xrange(len(data)):\n line += str(float(data[j][i])) + \" \"\n line += \"\\n\"\n ofile.write(line)\n \n ofile.close()\n print \"Finished writing file: \", output", "def write_eneheader(self,filename,replica):\n \n fheader = open(filename,'w')\n fheader.write('E_pot\\tE_rest(D)\\tD\\tcontact_state\\ttemp\\n')\n fheader.write('# Energy units: Joules/mol\\n')\n fheader.write('# Restrained contact state: ' + repr(replica.mc.restraint.contacts) + '\\n')\n fheader.write('# kspring: '+str(replica.mc.restraint.kspring) + '\\n')\n\tfheader.close()", "def WriteIndexHeader(indexFileHeaderText, formatindex, fpindex):#{{{\n if formatindex == FORMAT_TEXT:\n for s in indexFileHeaderText:\n print(s, file=fpindex)\n else:\n dumpedtext='\\n'.join(s for s in indexFileHeaderText)\n vI = array('I')\n vI.append(len(dumpedtext))\n vI.tofile(fpindex)\n fpindex.write(dumpedtext)", "def write_to_file(filename, data):\n with open(filename, \"a\") as file:\n file.writelines(data)", "def write(self, data, filename=None):\n if not filename:\n filename = self.output_csv\n\n with open(filename, \"w\") as _file:\n writer = csv.writer(_file)\n\n writer.writerow(list(_ for _ in self.header()))\n writer.writerows(data)", "def extract_tags_to_file(data, filename):\n data.sort(key=lambda tag: tag[1], reverse=True)\n with open(filename, 'w') as f:\n # first four lines for metadata\n f.write(filename + '\\n')\n f.write('tags: %d\\n\\n\\n' % len(data))\n for tag in data:\n f.write('%s\\t\\t\\t%d\\n' % (tag[0], tag[1]))", "def write_data_files(self):\n # build our strings\n header_string = \"\"\n data_string = \"\"\n for value in self.data.values():\n header_string += value[2] + \",\"\n if value[0] != None:\n data_string += value[1].format(value[0])\n else:\n data_string += \",\"\n # remove the extra comma and replace with a newline\n header_string = header_string[:-1]\n header_string += \"\\n\"\n data_string = data_string[:-1]\n data_string += \"\\n\"\n \n # show what we built\n #print(header_string)\n #print(data_string)\n \n # open a temp file\n with open(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"w\") as temp_file:\n #temp_file.write(header_string)\n temp_file.write(data_string)\n \n # move to the input file\n filetools.mv(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"{:s}\\\\VWSInput\\\\data.csv\".format(self.path))\n \n return", "def write_to_file(filename, data):\n with open(filename, \"a\") as file:\n file.writelines(data)", "def write_data(self, filename,\n columns=('Q', 'R', 'dR'),\n header=None):\n if header is None:\n header = \"# %s\\n\"%' '.join(columns)\n with open(filename, 'wb') as fid:\n fid.write(asbytes(header))\n data = np.vstack([getattr(self, c) for c in columns])\n np.savetxt(fid, data.T)", "def write_sff_header(header, fh, num=None):\r\n\r\n lines = [\"Common Header:\"]\r\n if (num is not None):\r\n header[\"# of Flows\"] = num\r\n\r\n lines.extend([\" %s:\\t%s\" % (param, header[param])\r\n for param in header])\r\n fh.write(\"\\n\".join(lines) + \"\\n\\n\")", "def file_creator(title_list):\n for file_name in title_list: #title names are retrieved out of genID.txt\n with open (\"nuc_variant_calls/\"+file_name.strip()+\".var\",'w') as x:\n x.write(\"Feature type\\tAlignment length\\tIdentical nucleotides\\tIndel count\\n\") #Table headers.", "def write_flt_file(filename, data, dsize):\n binfile = open(filename,'wb')\n\n dsize = numpy.array(dsize)\n dsize[-1] = data.shape[0]\n\n header = [len(dsize)] # dimension\n header.extend(list(dsize)) # size\n header.append(4) # data type: float\n header.append(dsize.prod()) # total length of data\n\n a = array.array('i')\n a.fromlist(header)\n if is_little_endian():\n a.byteswap()\n\n a.tofile(binfile)\n\n a = array.array('f')\n for o in data:\n a.fromlist(list(o))\n if is_little_endian():\n a.byteswap()\n a.tofile(binfile)\n binfile.close()", "def process_header_data(spark, input_dir, output):\n\theader = spark.read \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.option(\"inferSchema\", True) \\\n\t\t.csv(f\"{input_dir}/ams/*/*/ams__header_*__*.csv\") \\\n\t\t.select(*header_cols) \\\n\t\t.where(col('identifier').isNotNull())\n\n\tbill = spark.read \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.option(\"inferSchema\", True) \\\n\t\t.csv(f\"{input_dir}/ams/*/*/ams__billgen_*__*.csv\") \\\n\t\t.select(*bill_cols)\n\n\theader_full = header.join(bill, ['identifier'], how='left')\n\n\theader_full.repartition(1).write.mode('overwrite').format(\"csv\") \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.save(f\"{output}/header/\")", "def print_header(filename):\n\n date_list = filename[0:10].split('_')\n # Hint: CWB Metadata cannot contain dashes -\n name = 'id=\"{}\"'.format(filename[0:-4].replace('-', '_'))\n date = 'date=\"{}\"'.format('_'.join(date_list))\n year = 'year=\"{}\"'.format(date_list[0])\n month = 'month=\"{}\"'.format(date_list[1])\n day = 'day=\"{}\"'.format(date_list[2])\n\n header = '<text {} {} {} {} {}>'.format(name, date, year, month, day)\n\n print(header)", "def write_header(self, name, type, level=None, is_aux_number=None):\n names = []\n types = []\n levels = []\n is_aux_numbers = []\n for index in xrange(len(name)):\n names.append(c_str(name[index]))\n types.append(_NP_2_DTYPE[type[index]])\n levels.append(0 if level is None else level[index])\n is_aux_numbers.append(0 if is_aux_number is None else is_aux_number[index])\n check_call(LIB.HPPS_RecordIOWriteHeader(self.handle,\n len(names),\n c_array(ctypes.c_char_p, names),\n c_array(ctypes.c_int, types),\n c_array(ctypes.c_int, levels),\n c_array(ctypes.c_int, is_aux_numbers)))", "def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()", "def write_header_in_gz_file(self):\r\n if self.arguments['--out']:\r\n self.file = gzip.open(self.arguments['--out'] + '.gz', \"w+b\")\r\n self.file.write(self.version.encode('utf-8'))\r\n for list_item in self.list_of_header_objects:\r\n self.file.write(list_item.line.encode('utf-8'))\r\n self.file.write(self.body_header_line.line)\r\n self.file.close()\r\n else:\r\n for list_item in self.list_of_header_objects:\r\n print(list_item.line.encode('utf-8'))", "def write(data: orm.Data, filename: str) -> None:\n save(to_bands_inspect(data), hdf5_file=filename)", "def generate_output_file(data, extension, headers):\n output_data = _replace_boolean(data)\n output_name = _generate_output_name(extension)\n with open(output_name, 'a', newline='') as file:\n _file_writer(file, extension, output_data, headers)", "def _write_header(self, out_handle):\n out_handle.write(\"##gff-version 3\\n\")", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.csv\", \"w\")\n file.write(str(\",\".join(hdata)) + \"\\n\")", "def writefits(self,filename, z,lmu):\n t = Table([z,self.ebl_array(z,lmu)], names = ('REDSHIFT', 'EBL_DENS'))\n t2 = Table()\n t2['WAVELENGTH'] = Column(lmu, unit = 'micrometer')\n\n hdulist = fits.HDUList([fits.PrimaryHDU(),fits.table_to_hdu(t),fits.table_to_hdu(t2)])\n\n hdulist[1].name = 'NUINU_VS_Z'\n hdulist[2].name = 'WAVELENGTHS'\n\n hdulist.writeto(filename, overwrite = True)\n return", "def alter_journal_header(self, qls_dir, queue_name, partition=None, data_size=None):\n #print(os.listdir(qls_dir))\n jfh = None\n try:\n jrnl2_dir = os.path.join(qls_dir, 'jrnl2')\n queue_list = os.listdir(jrnl2_dir)\n if queue_name in queue_list:\n queue_dir = os.path.join(jrnl2_dir, queue_name)\n jrnl_link_list = os.listdir(queue_dir)\n if len(jrnl_link_list) > 0:\n jrnl_link = os.path.join(queue_dir, jrnl_link_list[0])\n jrnl_file = os.readlink(jrnl_link)\n if os.path.getsize(jrnl_file) == 0x201000:\n jfh = open(jrnl_file, 'r+b')\n file_header = jfh.read(72) # Read 72-byte file header\n hdr_list = list(struct.unpack('IHHQQHHIQQQQQ', file_header))\n if partition:\n hdr_list[6] = partition\n if data_size:\n hdr_list[8] = data_size\n file_hdr = struct.pack('IHHQQHHIQQQQQ', *hdr_list)\n jfh.seek(0)\n jfh.write(file_hdr)\n else:\n self.fail('Journal file is invalid size: 0x%x bytes, expected 0x201000 bytes' % os.path.getsize(jrnl_file))\n else:\n self.fail('Queue \"%s\" is empty')\n else:\n self.fail('Queue \"%s\" not found in directory %s, dir found: %s' % (queue_name, jrnl2_dir, queue_list))\n except (IOError, OSError) as e:\n self.fail(e)\n finally:\n if jfh and not jfh.closed:\n jfh.close()", "def add_header(header, filename, i):\n with open(filename, 'r+') as f:\n content = f.readlines()\n content[0] = header\n f.seek(0,0)\n f.write(f'<!-- Generated with XMLGenerator.py {__ver__} | {get_app_name(i)} -->\\n')\n f.writelines(content)", "def write(self, filename, data):\n raise NotImplementedError", "def write_header(output_file, line, file_headers):\n output_file.write('%s\\t' % line)\n for index, header in enumerate(file_headers):\n output_file.write(header.strip())\n if index < (len(file_headers) - 1):\n output_file.write('\\t')\n output_file.write('\\n')", "def print_header(fitsfile, ext=0, ofileh=sys.stdout):\n\n hdr = fitsio.read_header(fitsfile, ext=ext)\n ofileh.write(f\"{hdr}\")\n ofileh.write(\"\\n\")", "def write_file(self):\r\n # -open file for writing\r\n f_fbob = open(self.fn_path, 'w')\r\n\r\n # -write header\r\n f_fbob.write('%s\\n' % (self.heading))\r\n\r\n # -write sections 1 & 2 : NOTE- what about NOPRINT?\r\n f_fbob.write('%10i%10i%10i%10i\\n' % (self.nqfb, self.nqcfb,\r\n self.nqtfb, self.iufbobsv))\r\n f_fbob.write('%10e\\n' % (self.tomultfb)) # check format\r\n\r\n # -write sections 3-5 looping through observations groups\r\n c = 0\r\n for i in range(self.nqfb):\r\n # while (i < self.nqfb):\r\n # write section 3\r\n f_fbob.write('{:10d}{:10d}\\n'.format(self.nqobfb[i],\r\n self.nqclfb[i]))\r\n\r\n # Loop through observation times for the groups\r\n for j in range(self.nqobfb[i]):\r\n # -write section 4\r\n f_fbob.write(\r\n '{}{:10d}{:10.4g}{}{:10.4g}\\n'.format(self.obsnam[c],\r\n self.irefsp[c],\r\n self.toffset[c], ' ',\r\n self.flwobs[c]))\r\n c += 1 # index variable\r\n\r\n # -write section 5 - NOTE- need to adjust factor for muliple obs same cell\r\n for j in range(abs(self.nqclfb[i])):\r\n if self.nqclfb[\r\n i] < 0: # set factor to 1.0 for all cells in group\r\n self.factor[i, :] = 1.0\r\n f_fbob.write('{:10d}{:10d}{:10d}{}{:10f}\\n'\r\n .format(self.layer[i, j], (self.row[i, j]),\r\n self.column[i, j],\r\n ' ', self.factor[\r\n i, j])) # note- is 10f good enough here?\r\n\r\n f_fbob.close()\r\n #\r\n # swm: BEGIN hack for writing standard file\r\n sfname = self.fn_path # swm:hack\r\n sfname += '_ins' # swm: hack\r\n # write header\r\n f_ins = open(sfname, 'w') # swm: hack for standard file\r\n f_ins.write('jif @\\n') # swm: hack for standard file\r\n f_ins.write('StandardFile 0 1 %s\\n' % (\r\n self.nqtfb)) # swm: hack for standard file\r\n for i in range(0, self.nqtfb):\r\n f_ins.write(\r\n '{}\\n'.format(self.obsnam[i])) # swm: hack for standard file\r\n\r\n f_ins.close()\r\n # swm: END hack for writing standard file\r\n\r\n return", "def add_header(in_file, file_type):\n\n if file_type == 'bowtie':\n header = \"Read name\\t\" + \"Reference strand\\t\" + \"Name of reference sequence\\t\" \\\n + \"Position alignment occurs\\t\" + \"Read sequence\\t\" + \"Read qualities\\t\" \\\n + \"Ceiling\\t\" + \"Mismatch descriptors\\n\"\n else:\n header = ''\n\n # Temp file for final results including header\n temp_out = tempfile.mkstemp()\n f_in = open(in_file, 'r')\n results = f_in.read()\n f_out = open(temp_out[1] + '.txt', 'w')\n f_out.write(header)\n f_out.write(results)\n\n f_in.close()\n f_out.close()\n return temp_out[1] + '.txt'", "def generate_data(self, file_name, data, header=None):\n with open(f'{self.path_file}/{file_name}.csv', 'w') as csvfile:\n if header:\n csvfile.write(header)\n csvfile.writelines(data)\n return True", "def writeDataCSV(data,outpath,outfile,out_head=None,message='data'):\n if (out_head is not None):\n #nhead = out_head.count(',') + 1\n nhead = len(out_head.split(',')) # Split header at every comma\n if (data.shape[1] != nhead):\n print('Warning: No. of fields does not match number of headings in', \n 'output file:',outfile+'.csv')\n print('No. fields =',data.shape[1],', No. headings =',nhead)\n filename = join(outpath, outfile + '.csv')\n print('Saving',message,'in file:',filename)\n np.savetxt(filename,data,delimiter=',',header=out_head) \n return None", "def export_mlab_zone_header(output, header, options):\n headerdata = header.read()\n headerdata = headerdata % options.__dict__\n output.write(headerdata)", "def write_data_files(self):\n \n logging.info('\\n Start writing data files \\n')\n \n for i, (data_file, label_file) in enumerate(self.files):\n data_file, label_file = Path(data_file), Path(label_file)\n logging.info('Writing .hdf5 file for : [{}]'.format(str(data_file)))\n \n file_name = self.save_data_folder / '{}.hdf5'.format(label_file.name[:-4])\n if file_name.exists():\n continue\n \n with h5py.File(str(file_name), 'w') as writer:\n self.serialize_samples(\n writer, data_file, label_file)", "def WriteFile(fname, data):\n #self._out.Info(\"Write file '%s' size %d (%#0x)\" %\n #(fname, len(data), len(data)))\n with open(Filename(fname), 'wb') as fd:\n fd.write(data)", "def _writeRecord(self, path, name, data):\n file_path = os.path.join(path, name)\n with open(file_path, 'w') as f:\n for item in data:\n f.write(str(item)+'\\t')\n f.write('\\n')", "def write_data():\n\n data_location = os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", DATA_DIR))\n\n sbi_file_name = os.path.join(data_location, SBI_FILE)\n\n sbi = SbiInfo(sbi_file_name)\n\n # the test file is stored in the same directory as the script\n test_file = os.path.splitext(os.path.join(os.path.dirname(__file__), SBI_FILE))[0] + \".pkl\"\n _logger.info(\"Writing header object to {}\".format(os.path.join(os.path.dirname(__file__),\n test_file)))\n sbi.data.to_pickle(test_file)", "def write_header(self, *, version=3.01, file_type='O: Observation', satellite_type='M: Mixed GNSS',\n run_by='GPSLiDAR', organization='CCAR', observer='Adam Dodge', agency='CCAR', receiver_num='1',\n receiver_type='GENERIC_P1', receiver_vers='1.0.0', antenna_number=1, antenna_type='RTK2-F9P',\n delta_pos=[0,0,0]):\n markerstr = 'GPS LiDAR System at ' + self.longname\n if not os.path.isfile(self.fname):\n tstr = self.t.strftime('%Y%m%d %H%M%S')\n # TODO: Fix header (not working in readers)\n r = 6371000 + self.alt\n x = r * np.cos(self.lat * np.pi/180) * np.cos(self.lon * np.pi/180)\n y = r * np.cos(self.lat * np.pi/180) * np.sin(self.lon * np.pi/180)\n z = r * np.sin(self.lat * np.pi/180)\n header = f'{version:>9.2f}{\" \":<11s}{file_type:<20s}{satellite_type:<20s}{\"RINEX VERSION / TYPE\":<20s}\\n' + \\\n f'{run_by:<20s}{organization:<20s}{tstr:<16s}UTC {\"PGM / RUN BY / DATE\":<20s}\\n' + \\\n f'{markerstr:<60}{\"MARKER NAME\":<20s}\\n' + \\\n f'{self.station:<60}{\"MARKER NUMBER\":<20s}\\n' + \\\n f'{\"GEODETIC\":<20s}{\" \":40s}{\"MARKER TYPE\":<20s}\\n' + \\\n f'{observer:<20}{agency:<40}{\"OBSERVER / AGENCY\":<20s}\\n' + \\\n f'{receiver_num:<20}{receiver_type:<20}{receiver_vers:<20}{\"REC # / TYPE / VERS\":<20s}\\n' + \\\n f'{antenna_number:<20}{antenna_type:<40s}{\"ANT # / TYPE\":<20s}\\n' + \\\n f'{x:14.4f}{y:>14.4f}{z:>14.4f}{\" \":18s}{\"APPROX POSITION XYZ\":<20s}\\n' + \\\n f'{delta_pos[0]:14.4f}{delta_pos[1]:>14.4f}{delta_pos[2]:>14.4f}{\" \":18s}{\"ANTENNA: DELTA H/E/N\":<20s}\\n' + \\\n f'G {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'R {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'E {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'S {8:<3d} C1 L1 D1 S1 C5 L5 D5 S5 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'{\"DBHZ\":<60s}{\"SIGNAL STRENGTH UNIT\":<20s}\\n' + \\\n f'{self.t.year:>6d}{self.t.month:>6d}{self.t.day:>6d}{self.t.hour:>6d}{self.t.minute:>6d}' + \\\n f'{self.t.second:>13.7f} UTC{\" \":<9s}{\"TIME OF FIRST OBS\":<20s}\\n' + \\\n f' 0{\" \":54s}{\"RCV CLOCK OFFS APPL\":<20s}\\n' + \\\n f'G{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'R{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'E{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'S{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'{self.leapS:>6d}{\" \":>54s}{\"LEAP SECONDS\":<20s}\\n' + \\\n f'{\" \":>60s}{\"END OF HEADER\":<20s}\\n'\n\n try:\n with open(self.fname, 'w') as f:\n f.write(header)\n except FileNotFoundError:\n print('Data directory is bad. Try again.')\n sys.exit(0)", "def _write(fdesc, data):\n while data:\n count = os.write(fdesc, data)\n data = data[count:]", "def write_file(data,file_name):\r\n\twith open(file_name,'wb') as new_csv_file:\r\n\t\twrtr = writer(new_csv_file)\r\n\t\tfor row in data:\r\n\t\t\twrtr.writerow(row)", "def _write_all_headers(unit, fobj):\n\n now = datetime.datetime.now()\n if unit.jump_speed:\n jump_speed = '%.2f' % unit.jump_speed\n else:\n jump_speed = None\n\n # Case and order is significant.\n header_map = (\n ('Name', unit.name),\n ('Reference', unit.reference),\n ('Type', unit.unit_type),\n ('Unit_Era', unit.unit_era),\n ('Unit_TRO', unit.unit_tro),\n ('Move_Type', unit.unit_move_type),\n ('Tons', unit.weight),\n ('Comment', \"Saved by: btmux_maplib_io(Python) at %s\" % now.ctime()),\n ('Computer', 4),\n ('Radio', 5),\n ('Heat_Sinks', unit.heatsink_total),\n ('Mech_BV', unit.battle_value),\n ('Cargo_Space', unit.cargo_space),\n ('Max_Suits', unit.battlesuit_total),\n ('Max_Speed', '%.2f' % unit.max_speed),\n ('Jump_Speed', jump_speed),\n ('Specials', ' '.join(list(unit.specials))),\n )\n\n for header_name, header_value in header_map:\n if not header_value:\n continue\n if isinstance(header_value, list):\n header_value = ' '.join(header_value)\n header_str = \"{header_name:<16} {{ {header_value} }}\\n\".format(\n header_name=header_name, header_value=header_value)\n fobj.write(header_str)", "def output_data(data, filename):\n if filename:\n with open(filename, 'w') as f:\n json.dump(data, f, indent=4)\n else:\n print(json.dumps(table_data, indent=4))", "def put_header(file,text,comment=1):\n if len(text)==0: return\n if text[0]!='#' and comment: text='#'+text\n if text[-1]!='\\n':text=text+'\\n'\n buffer=text+open(file).read()\n open(file,'w').write(buffer)", "def write_file(filename, data):\n file = open(filename, \"a\")\n file.write(data)\n file.close()", "def outTxt(data, outPath, fileName):\n\n with open(outPath+fileName, \"wb\") as f:\n f.write(\"index,link,name,rating,review,price,category,neighborhood,address,phone,feedback\\n\")\n for record in data:\n f.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % \\\n (record[0],record[1],record[2],record[3],record[4],record[5],record[6],\\\n record[7],record[8],record[9],record[10]))", "def __gen_header( files: List[str] ) -> bytes:\n\n number_of_files = len( files )\n\n LOGGER( \"info\", \"Generating header...\" )\n\n # calculate sizes\n stringTable = '\\x00'.join([os.path.basename(file) for file in files])\n headerSize = 0x10 + (number_of_files)*0x18 + len(stringTable)\n remainder = 0x10 - headerSize%0x10\n\n # add padding to a multible of 0x10\n headerSize += remainder\n \n # get file information\n fileSizes = [os.path.getsize(file) for file in files]\n fileOffsets = [sum(fileSizes[:n]) for n in range(number_of_files)]\n \n # string table calculations\n fileNamesLengths = [len(os.path.basename(file))+1 for file in files] # +1 for the \\x00 separator\n stringTableOffsets = [sum(fileNamesLengths[:n]) for n in range(number_of_files)]\n \n\n # assemble header\n\n header = b'PFS0'\n header += pk('<I', number_of_files)\n header += pk('<I', len(stringTable)+remainder)\n header += b'\\x00\\x00\\x00\\x00'\n \n # add file info\n for n in range(number_of_files):\n header += pk('<Q', fileOffsets[n])\n header += pk('<Q', fileSizes[n])\n header += pk('<I', stringTableOffsets[n])\n header += b'\\x00\\x00\\x00\\x00'\n header += stringTable.encode()\n header += remainder * b'\\x00'\n \n LOGGER( \"info\", \"header successfully created.\" )\n\n return header", "def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n events_filename = Path(indir) / obs.filename('events', format=informat)\n try:\n table = Table.read(str(events_filename), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \" + str(events_filename)\n continue\n if table.meta[\"OBS_ID\"]!=obs.obs_id:\n continue\n # for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n # for filetype in ['events']:\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_table']:\n filename = Path(indir) / obs.filename(filetype, format=informat)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n elif filetype in ('psf_table'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(\n os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(indir + \"/\" + str(outfile)))\n table.write(indir + \"/\" + str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(indir + \"/\" + str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n testfile=obs.out_filename(\"events\", format=informat, dir=indir)\n try:\n table = Table.read(str(testfile), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \"+str(filename)\n continue\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n #for filetype in ['events']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n filename = obs.out_filename(filetype, format=informat, dir=indir)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(outfile))\n table.write(str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def write_file(path, data):\n result = []\n\n for word in sorted(data.keys()):\n result.append('%s %s\\n' % (word, \" \".join([str(i) for i in sorted(data[word])])))\n\n output = open(path, 'w')\n output.write(\"\".join(result))\n output.close()", "def write_voldata_to_mgh_file(mgh_file_name, vol_data, affine=None, header=None):\n if header is None:\n header = fsmgh.MGHHeader()\n image = fsmgh.MGHImage(vol_data, affine, header=header)\n image.to_filename(mgh_file_name)", "def generate_headerfile(template, n_division=10000, df=6, start_chi=25, filepath=\"Chi2PLookup.h\", verbose=False):\n divisor = \"const int Chi2PLookup::divisor = {};\".format(n_division)\n\n names = []\n cutoff = []\n p_values_arrays = []\n degrees_of_freedom = range(1, df+1)\n\n if verbose:\n print(\"Generating p-value arrays...\")\n print(\" df={}\".format(df))\n print(\" precision={}\".format(n_division))\n\n for df in degrees_of_freedom:\n var_name = \"pValues_{}\".format(df)\n names.append(var_name)\n max_chi = max_chi_value(df=df, start_chi=start_chi)\n cutoff.append(max_chi)\n n_elements = max_chi * n_division\n\n chi_values = (val / n_division for val in range(0, n_elements + 1))\n p_values = (str(1 - chi2.cdf(val, df)) for val in chi_values)\n\n if verbose:\n print(\"\\tAdding p-values array to template for degree of freedom = {} ...\".format(df))\n\n p_values_arrays.append(\"double {}[] = {{{}}};\".format(var_name, \", \".join(p_values)))\n\n cutoff_array = \"const int Chi2PLookup::cutoff[] = {{{}}};\".format(\", \".join([str(i) for i in cutoff]))\n p_values_array_of_arrays = \"const double * Chi2PLookup::pValues[] = {{{}}};\\n\".format(\", \".join(names))\n\n template = template.format(divisor, cutoff_array, \"\\n\".join(p_values_arrays), p_values_array_of_arrays)\n\n if verbose:\n print(\"Saving file to: {}\".format(os.path.abspath(filepath)))\n\n with open(filepath, \"w\") as outfile:\n outfile.write(template)\n\n return template", "def write_header(fpath, header):\n\n with open(fpath, 'r+') as f:\n content = f.read()\n # check if there is a shebang and encoding\n if content[:45] == '#!/usr/bin/env python\\n# -*- coding: utf-8 -*-':\n f.seek(46, 0)\n f.write('\\n' + header + content[46:])\n # check if there is only a shebang\n elif content[:21] == '#!/usr/bin/env python':\n f.seek(22, 0)\n f.write('\\n' + header + content[22:])\n # no shebang or encoding\n else:\n f.seek(0, 0)\n f.write(header + content)", "def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h", "def create_file_meta_data(vk4_container, args):\n log.debug(\"Entering create_file_meta_data()\")\n\n header_list = list()\n header_list.append(args.layer)\n header_list.append('\\n')\n header_list.append('File name')\n header_list.append(args.input)\n header_list.append('Title')\n header_list.append(args.input[:-4])\n header_list.append('Measurement date')\n header_list.append(str(vk4_container.measurement_conditions['month']) + '\\\\' +\n str(vk4_container.measurement_conditions['day']) + '\\\\' +\n str(vk4_container.measurement_conditions['year']))\n header_list.append('Measurement time')\n header_list.append(str(vk4_container.measurement_conditions['hour']) + ':' +\n str(vk4_container.measurement_conditions['minute']) + ':' +\n str(vk4_container.measurement_conditions['second']))\n # User mode?\n header_list.append('Objective lens')\n header_list.append(vk4_container.string_data['lens_name'] + ' ' +\n str(vk4_container.measurement_conditions['lens_magnification'] / 10.0) + 'x')\n header_list.append('Numerical Aperture')\n header_list.append(vk4_container.measurement_conditions['num_aperture'] / 1000.0)\n # Size? Standard?\n # Mode? Surface profile?\n # RPD? OFF?\n header_list.append('Quality')\n header_list.append('Skip 4 lines')\n header_list.append('Pitch (um)')\n header_list.append(vk4_container.measurement_conditions['pitch'] / 1000.0)\n header_list.append('Z measurement distance (um)')\n header_list.append(vk4_container.measurement_conditions['distance'] / 1000.0)\n # Double scan? OFF?\n header_list.append('Brightness 1')\n header_list.append(vk4_container.measurement_conditions['PMT_gain'])\n header_list.append('Brightness 2')\n br_2 = vk4_container.measurement_conditions['PMT_gain_2']\n header_list.append('---') if br_2 == 0 else header_list.append(br_2)\n # Not sure how they got ND filter to 30% in example csv\n header_list.append('ND filter (%)')\n header_list.append(vk4_container.measurement_conditions['ND_filter'] * 30)\n header_list.append('Optical zoom')\n header_list.append(vk4_container.measurement_conditions['optical_zoom'] / 10.0)\n # Average count? 1 time?\n # Filter? OFF?\n # Fine mode? ON?\n header_list.append('Line count')\n l_count = vk4_container.measurement_conditions['number_of_lines']\n header_list.append(l_count)\n\n header_list.append('Line position1')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][0])\n\n header_list.append('Line position2')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][1])\n\n header_list.append('Line position3')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][2])\n\n header_list.append('Camera gain (db)')\n header_list.append(vk4_container.measurement_conditions['camera_gain'] * 6)\n header_list.append('Shutter speed')\n header_list.append(vk4_container.measurement_conditions['shutter_speed'])\n header_list.append('White balance mode')\n wb_mode = vk4_container.measurement_conditions['white_balance_mode']\n header_list.append('Auto') if wb_mode == 1 else header_list.append(wb_mode)\n header_list.append('White balance R')\n header_list.append(vk4_container.measurement_conditions['white_balance_red'])\n header_list.append('White balance B')\n header_list.append(vk4_container.measurement_conditions['white_balance_blue'])\n header_list.append('Intensity correction mode')\n header_list.append('Gamma correction')\n header_list.append('Gamma correction value')\n header_list.append(vk4_container.measurement_conditions['gamma'] / 100.0)\n header_list.append('Gamma offset (%)')\n header_list.append(vk4_container.measurement_conditions['gamma_correction_offset'] /\n 65536.0)\n # W/B inversion? OFF?\n # Head type? VK-X110?\n # Correct intensity eccentricity? OFF?\n # Correct field curvature? OFF?\n header_list.append('XY calibration (nm/pixel)')\n header_list.append(vk4_container.measurement_conditions['x_length_per_pixel'] / 1000.0)\n header_list.append('Z calibration (nm/digit)')\n header_list.append(vk4_container.measurement_conditions['z_length_per_digit'] / 1000.0)\n # Saturation?\n # Contrast?\n # Brightness?\n # AI noise elimination? Auto(ON)?\n # Angled surface noise filter? Auto(OFF)?\n header_list.append('Width')\n header_list.append(vk4_container.image_width)\n header_list.append('Height')\n header_list.append(vk4_container.image_height)\n # Skip amount? 1?\n\n out_type = args.type\n if out_type == 'hcsv':\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return np.reshape(header_list, (len(header_list) // 2, 2))\n else:\n # Can use a dict to attach info to an image using PILs Image module\n meta_dict = dict()\n for n in range(0, len(header_list), 2):\n meta_dict[header_list[n]] = header_list[n + 1]\n\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return meta_dict", "def write_blob(blob, header_data, path, levels):\n if path is None:\n path = \"\"\n\n mkdir(os.path.join(\"file\", path))\n mkdir(os.path.join(\"raw\", path))\n\n if blob.is_binary:\n content = None\n highlighted = None\n rendered = None\n nlines = 0\n else:\n content = blob.data.decode()\n nlines = len(content.strip().split(\"\\n\"))\n\n lexer = pick_lexer(blob.name, content)\n highlighted = highlight(content, lexer)\n\n if isinstance(lexer, lexers.MarkdownLexer):\n rendered = markdown.markdown(\n content,\n extensions=[\n \"extra\",\n \"admonition\",\n \"codehilite\",\n \"legacy_attrs\",\n \"legacy_em\",\n \"meta\",\n # \"nl2br\",\n \"sane_lists\",\n \"smarty\",\n \"toc\",\n \"wikilinks\",\n ],\n )\n else:\n rendered = None\n\n data = {\n \"size\": blob.size,\n \"name\": blob.name,\n \"mode\": format_filemode(blob.filemode),\n \"full_name\": os.path.join(path, blob.name),\n \"binary\": blob.is_binary,\n \"nlines\": nlines,\n }\n\n write_output(\n template=\"file.html\",\n outfile=f\"file/{path}/{blob.name}.html\",\n content=highlighted,\n rendered=rendered,\n name=blob.name,\n header=header_data,\n rootpath=\"/\".join([\"..\"] * levels),\n file=data,\n )\n\n raw_path = os.path.join(\"raw\", path, blob.name)\n with open(raw_path, \"wb\") as out_fh:\n out_fh.write(blob.data)\n\n return data", "def append2hdr(self, keyword=None, value=None, ext=False):\n if keyword is not None:\n if ext:\n self.hdr_ext[keyword] = value\n self.write_headerfile(self.hdrfile_ext, self.hdr_ext)\n else:\n self.hdr[keyword] = value\n self.write_headerfile(self.hdrfile, self.hdr)" ]
[ "0.81443864", "0.65365857", "0.6521095", "0.642603", "0.6422746", "0.6345552", "0.63141286", "0.622469", "0.6156558", "0.6150772", "0.6104141", "0.60294604", "0.5993755", "0.5916143", "0.58797914", "0.58024967", "0.57776046", "0.57631105", "0.5731549", "0.57286596", "0.57176596", "0.57096976", "0.56874543", "0.56874293", "0.56824505", "0.5671921", "0.5641608", "0.5618318", "0.56097484", "0.5597597", "0.55955255", "0.5589541", "0.5571753", "0.5567484", "0.5557877", "0.55463773", "0.552839", "0.5524669", "0.55235785", "0.55207634", "0.55166817", "0.5501269", "0.5497975", "0.5496696", "0.548964", "0.54892313", "0.5483405", "0.54800785", "0.5472273", "0.5471437", "0.54704684", "0.546177", "0.54271364", "0.54220533", "0.54171556", "0.5404422", "0.53951985", "0.5383463", "0.53758514", "0.53728485", "0.53723496", "0.5368722", "0.53670096", "0.53626776", "0.53505594", "0.5341537", "0.53365177", "0.5332923", "0.53297746", "0.53291655", "0.53276515", "0.53249824", "0.53162736", "0.5315435", "0.5310632", "0.5309667", "0.5306755", "0.53052837", "0.52969784", "0.5296845", "0.52865857", "0.5286495", "0.5283747", "0.52826726", "0.528134", "0.5280582", "0.5280167", "0.5280108", "0.5278901", "0.52719325", "0.52718616", "0.5271085", "0.5266222", "0.5260086", "0.5257378", "0.5256559", "0.5248688", "0.5245682", "0.5239842", "0.5233972" ]
0.8396805
0
write header from a single file data into a disk file called filename. The filename has to be either hdf or bdb. lima position in the disk files into which header will be written, i.e., header from data will be written into file number lima
def write_header(filename, data, lima): from utilities import file_type from EMAN2db import db_open_dict ftp = file_type(filename) if ftp == "bdb": DB = db_open_dict(filename) DB.set_header(lima, data) elif ftp == "hdf": data.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True) else: ERROR("Unacceptable file format","write_headers",1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_headers(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\t# For unknown reasons this does not work on Linux, but works on Mac ??? Really?\n\t\tDB = db_open_dict(filename)\n\t\tfor i in range(len(lima)):\n\t\t\tDB.set_header(lima[i], data[i])\n\t\tDB.close()\n\t\t#for i in range(len(lima)):\n\t\t#\tdata[i].write_image(filename, lima[i])\n\telif ftp == \"hdf\":\n\t\tfor i in range(len(lima)):\n\t\t\tdata[i].write_image(filename, lima[i], EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def writeheader(filename, header):\n # convert string to [unsigned] byte array\n hh = np.zeros(512, dtype='uint8')\n for i, ss in enumerate(header):\n hh[i] = ord(ss)\n # write header to file\n file_arr = np.memmap(filename, dtype='uint8', mode='r+', shape=(512,))\n file_arr[:512] = hh[:]\n del file_arr\n return", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = glia.match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def write(self, filename, data, hdr):\n pass", "def write_data(fh, header, data):\r\n # fhw = open(filename, \"w\")\r\n fh.write(str(header))\r\n fh.write(str(data) + \"\\n\")", "def write_header(self, fd):\n fd.write(f\"BEGIN {self.name}\")\n if len(self.data_items) > 0:\n if isinstance(self.data_items[0], mfdatascalar.MFScalar):\n one_based = (\n self.data_items[0].structure.type == DatumType.integer\n )\n entry = self.data_items[0].get_file_entry(\n values_only=True, one_based=one_based\n )\n else:\n entry = self.data_items[0].get_file_entry()\n fd.write(str(entry.rstrip()))\n if len(self.data_items) > 1:\n for data_item in self.data_items[1:]:\n entry = data_item.get_file_entry(values_only=True)\n fd.write(\"%s\" % (entry.rstrip()))\n if self.get_comment().text:\n fd.write(\" \")\n self.get_comment().write(fd)\n fd.write(\"\\n\")", "def _create_header_file(tensor_name, npy_data, output_path, data_linkage):\n file_path = pathlib.Path(f\"{output_path}/\" + tensor_name).resolve()\n # create header file\n raw_path = file_path.with_suffix(\".h\").resolve()\n with open(raw_path, \"w\") as header_file:\n header_file.write(\"#include <stddef.h>\\n\")\n header_file.write(\"#include <stdint.h>\\n\")\n header_file.write(\"#include <dlpack/dlpack.h>\\n\")\n header_file.write(f\"const size_t {tensor_name}_len = {npy_data.size};\\n\")\n\n _emit_data_linkage(header_file, data_linkage)\n\n header_file.write(f\"{NP_TYPE_TO_C[str(npy_data.dtype)]} {tensor_name}[] =\")\n\n header_file.write(\"{\")\n for i in np.ndindex(npy_data.shape):\n header_file.write(f\"{npy_data[i]}, \")\n header_file.write(\"};\\n\\n\")", "def write_data_2(fh, header, data):\r\n # fhw = open(filename, \"w\")\r\n if len(header) <= 0 or len(data) <= 0:\r\n return\r\n else:\r\n fh.write(str(header + \"\\n\"))\r\n fh.write(str(data) + \"\\n\")\r\n fh.write(\"\\n\")", "def _save_to_file(filename, data, start=0, header_size=None):\n if header_size is None:\n header_size = 0\n item_dtype = data.dtype\n # Open file as necessary\n opened = False\n if isinstance(filename, str):\n fd = open(filename, 'rb+')\n opened = True\n else:\n fd = filename\n # Seek to halo location and write\n offset = header_size + (start * item_dtype.itemsize)\n fd.seek(offset, os.SEEK_SET)\n data.tofile(fd)\n if opened:\n fd.close()", "def edf_write(data, file_name, header_size=1024):\n # get current time\n from time import gmtime, strftime\n today = strftime('%d-%b-%Y', gmtime())\n size = np.shape(data)\n print('data size in pixels is ', size)\n nbytes = np.prod(size) * data.dtype.itemsize\n print('opening', file_name, 'for writing')\n # craft an ascii header of the appropriate size\n f = open(file_name, 'wb')\n head = '{\\n'\n head += 'HeaderID = EH:000001:000000:000000 ;\\n'\n head += 'Image = 1 ;\\n'\n head += 'ByteOrder = LowByteFirst ;\\n'\n head += 'DataType = %13s;\\n' % numpy_to_esrf_datatype(data.dtype)\n print('using data type %s' % numpy_to_esrf_datatype(data.dtype))\n head += 'Dim_1 = %4s;\\n' % size[0]\n if len(size) > 1: head += 'Dim_2 = %4s;\\n' % size[1]\n if len(size) > 2: head += 'Dim_3 = %4s;\\n' % size[2]\n head += 'Size = %9s;\\n' % nbytes\n head += 'Date = ' + today + ' ;\\n'\n for i in range(header_size - len(head) - 2):\n head += ' '\n head += '}\\n'\n f.write(head.encode('utf-8'))\n if len(data.shape) == 3:\n s = np.ravel(data.transpose(2, 1, 0)).tostring()\n elif len(data.shape) == 2:\n s = np.ravel(data.transpose(1, 0)).tostring()\n else:\n s = np.ravel(data).tostring()\n f.write(s)\n f.close()", "def write_file(self,filename):\n \n with open(filename, 'w') as f:\n tab_width = np.max([len(k) for k in self.header.keys()])\n for k,v in self.header.items():\n f.write(u'{0}:\\t{1}\\n'.format(k, v).encode('utf8').expandtabs(tab_width+2))\n np.savetxt(f, self.data, fmt ='%f %f %f %d')", "def write_headerfile(self, header_file, header):\n f = open(header_file, 'w')\n for iii in range(len(header)):\n outline = str(header[iii:iii+1]).strip().rstrip('END').strip()+'\\n'\n f.write(outline)\n f.close()", "def write_to_file(data, filename):\n fimg = fits.HDUList()\n fimghdu = fits.PrimaryHDU()\n fimghdu.data = data\n fimg.append(fimghdu)\n fimg.writeto(filename, overwrite=True)\n print(' wrote output data to: ', filename)", "def _write_header(self, out_handle):\n out_handle.write(\"##gff-version 3\\n\")", "def write_sff_header(header, fh, num=None):\r\n\r\n lines = [\"Common Header:\"]\r\n if (num is not None):\r\n header[\"# of Flows\"] = num\r\n\r\n lines.extend([\" %s:\\t%s\" % (param, header[param])\r\n for param in header])\r\n fh.write(\"\\n\".join(lines) + \"\\n\\n\")", "def write_header(fpath, header):\n\n with open(fpath, 'r+') as f:\n content = f.read()\n # check if there is a shebang and encoding\n if content[:45] == '#!/usr/bin/env python\\n# -*- coding: utf-8 -*-':\n f.seek(46, 0)\n f.write('\\n' + header + content[46:])\n # check if there is only a shebang\n elif content[:21] == '#!/usr/bin/env python':\n f.seek(22, 0)\n f.write('\\n' + header + content[22:])\n # no shebang or encoding\n else:\n f.seek(0, 0)\n f.write(header + content)", "def make_odb_header(odbfile, dataset):\n \n header = 'headers/' + dataset + '_header.dat'\n \n if not os.path.isfile ( header ):\n print(' Creating the header file for the dataset: ', dataset )\n if dataset in ('era5_1','era5_2'):\n \n odbfile = odbfile.replace('.gz','')\n else:\n odbfile = odbfile.replace('.gz','').replace('.conv._','.conv.')\n \n rdata=subprocess.check_output([\"odb\",\"header\", odbfile ])\n \n with open( header , 'wb' ) as f:\n f.write(rdata) \n \n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n') \n \n else:\n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n')\n #print(' Done reading the existing header file for the dataset: ', dataset )\n \n columns, kinds, tdict =[] , [] , {} \n \n for r in rdata[2:-2]:\n try:\n \n if r[:6]=='Header':\n break\n else: \n columns.append(r.split('name: ')[1].split(',')[0])\n kinds.append(r.split('type: ')[1].split(',')[0])\n if kinds[-1]=='REAL':\n tdict[columns[-1]]=numpy.float32\n elif 'INTEGER' in kinds[-1] or 'BITFIELD' in kinds[-1]:\n #print(columns[-1])\n if columns[-1]=='sonde_type@conv' or columns[-1]=='station_type@conv':\n tdict[columns[-1]]=numpy.float32\n else: \n tdict[columns[-1]]=numpy.int32\n else:\n tdict[columns[-1]]=numpy.dtype('S') # dict containng column name and type\n \n except IndexError:\n pass \n \n \"\"\" This is done otherwise for the era5 databases (1759,1761,3188) the tdict has different length than the columns list.\n So the following call alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) breaks \"\"\" \n for t in tdict.keys():\n if t not in columns:\n #print(\"Removing non appearing fb column: \" , c) \n del tdict[t]\n \n \"\"\" These values must be removed rom the fb, since they have NULL values and it creates problem with \n alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) \"\"\" \n \n if dataset in [\"era5_1759\", \"era5_1761\", \"era5_3188\"]:\n remove = ['sonde_type@conv' , \"eda_spread@errstat\", \"bias_volatility@body\" , \"timeseries_index@conv\"]\n for c in remove:\n #print(\"Removing wrong fb column: \" , c)\n try:\n columns.remove(c)\n del tdict[c]\n except:\n pass\n return columns, kinds, tdict", "def to_file(self, filename):\n self.header['n'] = self.n\n save_gyre(filename, self.header, self.data)", "def print_header(filename):\n\n date_list = filename[0:10].split('_')\n # Hint: CWB Metadata cannot contain dashes -\n name = 'id=\"{}\"'.format(filename[0:-4].replace('-', '_'))\n date = 'date=\"{}\"'.format('_'.join(date_list))\n year = 'year=\"{}\"'.format(date_list[0])\n month = 'month=\"{}\"'.format(date_list[1])\n day = 'day=\"{}\"'.format(date_list[2])\n\n header = '<text {} {} {} {} {}>'.format(name, date, year, month, day)\n\n print(header)", "def write_header(outfbfile, header_params, header):\n for hp in header_params:\n hdrval = sigproc.addto_hdr(hp, header[hp])\n outfbfile.write(hdrval)", "def print_header(fitsfile, ext=0, ofileh=sys.stdout):\n\n hdr = fitsio.read_header(fitsfile, ext=ext)\n ofileh.write(f\"{hdr}\")\n ofileh.write(\"\\n\")", "def save_header_default(filename, nhalos_per_tree):\n ntrees = len(nhalos_per_tree)\n nhalos = np.sum(nhalos_per_tree)\n dtype1 = np.dtype([('ntrees', 'i4'), ('totnhalos', 'i4')])\n x1 = np.array([(ntrees, nhalos)], dtype=dtype1)\n x2 = nhalos_per_tree.astype('i4')\n header_size = x1.nbytes + x2.nbytes\n # Open\n if isinstance(filename, str):\n fd = open(filename, 'wb')\n close = True\n else:\n fd = filename\n close = False\n # Write\n x1.tofile(fd)\n x2.tofile(fd)\n # Close\n if close:\n fd.close()\n return header_size", "def write_uef_header(file, major, minor):\n\n\t# Write the UEF file header\n\tfile.write('UEF File!\\000')\n\n\t# Minor and major version numbers\n\tfile.write(number(1, minor) + number(1, major))", "def writeheader(fh,colnames):\n for i in range(len(colnames)):\n fh.write('# %d %s\\n'%(i+1,colnames[i]))", "def add_header(header, filename, i):\n with open(filename, 'r+') as f:\n content = f.readlines()\n content[0] = header\n f.seek(0,0)\n f.write(f'<!-- Generated with XMLGenerator.py {__ver__} | {get_app_name(i)} -->\\n')\n f.writelines(content)", "def write_header(self):\r\n if self.arguments['--out']:\r\n self.file = open(self.arguments['--out'], \"w+\")\r\n self.file.write(self.version)\r\n for list_item in self.list_of_header_objects:\r\n self.file.write(list_item.line)\r\n self.file.write(self.body_header_line.line)\r\n self.file.close()\r\n else:\r\n for list_item in self.list_of_header_objects:\r\n print(list_item.line)\r\n print(self.body_header_line.line)", "def WriteHeaderFileForCcmModel(filename, model): \n\n ccm_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n header_file = open(ccm_model_name + \".hpp\", 'w')\n\n #Define the header files\n header_file_defn = GetHeaderFileDefinitionString(filename, model)\n header_file.write(header_file_defn)\n\n #Include the appropriate files\n include_files = GetIncludedFilesForHeaderString()\n header_file.write(include_files)\n\n #Define the ODE System class\n ode_class = GetOdeClassDefinitionString(filename, model)\n header_file.write(ode_class)\n\n #Define the serialization\n serialization = GetSerializationInformationString(filename)\n header_file.write(serialization)\n\n #Define the SRN model\n srn_model_defn = GetModelDefinitionString(filename, model, True)\n header_file.write(srn_model_defn)\n\n #Close the file\n header_close = GetHeaderFileClosingString(filename, model)\n header_file.write(header_close)\n\n header_file.close()\n\n print(ccm_model_name + \".hpp written!\\n\")", "def writeHeader(self,header):\n pass", "def tabser(filename, body, data):\n # XXX checksums ignored\n head = Struct(\"!BiHBxxxB\")\n body = Struct(body)\n # foot = Struct(\"!4s\")\n\n buffer = bytearray([0] * (2 ** 16))\n head.pack_into(buffer, 0, 0, int(time()), len(data), body.size, 0),\n offset = head.size\n for row in data:\n body.pack_into(buffer, offset, *row, 0)\n offset += body.size\n else:\n print(\"write %d rows\" % len(data))\n # offset = 2 ** 16 - foot.size\n # foot.pack_into(buffer, offset, bytes([0, 0, 0, 0]))\n with open(filename, \"wb\") as f:\n f.write(buffer)", "def put_header(file,text,comment=1):\n if len(text)==0: return\n if text[0]!='#' and comment: text='#'+text\n if text[-1]!='\\n':text=text+'\\n'\n buffer=text+open(file).read()\n open(file,'w').write(buffer)", "def WriteHeaderFileForSrnModel(filename, model): \n\n srn_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n header_file = open(srn_model_name + \".hpp\", 'w')\n\n #Define the header files\n header_file_defn = GetHeaderFileDefinitionString(filename, model)\n header_file.write(header_file_defn)\n\n #Include the appropriate files\n include_files = GetIncludedFilesForHeaderString()\n header_file.write(include_files)\n\n #Define the ODE System class\n ode_class = GetOdeClassDefinitionString(filename, model)\n header_file.write(ode_class)\n\n #Define the serialization\n serialization = GetSerializationInformationString(filename)\n header_file.write(serialization)\n\n #Define the SRN model\n srn_model_defn = GetModelDefinitionString(filename, model, True)\n header_file.write(srn_model_defn)\n\n #Close the file\n header_close = GetHeaderFileClosingString(filename, model)\n header_file.write(header_close)\n\n header_file.close()\n\n print(srn_model_name + \".hpp written!\\n\")", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.txt\", \"w\")\n file.write(str(\"\\t\".join(hdata)) + \"\\n\")", "def writeHeader( self ):\n for k in self.secondaryTargets.keys():\n fileName = self.treyGene[k] + \"-GenesinCommon.txt\" \n with open( fileName, 'w' ) as out:\n out.write(\"%s\\t%s\\t%s\\n\" %(\"Gene_trey\", \"Gene\", \"Gene_inCommon\" ))\n out.close()", "def save_gyre(filename, header, data):\n with open(filename, 'wt') as f:\n header_length = len(list(header[()]))\n # if header_length == 4:\n # fmt = ''.join(['%6i','%26.16E'*3,'\\n'])\n # elif header_length == 5:\n # fmt = ''.join(['%6i','%26.16E'*3,'%6i\\n'])\n # else:\n # raise ValueError(\"header should have 4 or 5 components but \"\n # \"it appears to have %i\" % header_length)\n if not 'version' in header.dtype.names:\n fmt = ''.join(['%6i','%26.16E'*3,'\\n'])\n else:\n fmt = ''.join(['%6i','%26.16E'*3,'%6i\\n'])\n\n f.writelines([fmt % tuple(header[()])])\n\n N = len(data[0])-1\n fmt = ''.join(['%6i',' %26.16E'*N,'\\n'])\n for row in data:\n f.writelines([fmt % tuple(row)])", "def export_mlab_zone_header(output, header, options):\n headerdata = header.read()\n headerdata = headerdata % options.__dict__\n output.write(headerdata)", "def write_header(output_file, line, file_headers):\n output_file.write('%s\\t' % line)\n for index, header in enumerate(file_headers):\n output_file.write(header.strip())\n if index < (len(file_headers) - 1):\n output_file.write('\\t')\n output_file.write('\\n')", "def WriteIndexHeader(indexFileHeaderText, formatindex, fpindex):#{{{\n if formatindex == FORMAT_TEXT:\n for s in indexFileHeaderText:\n print(s, file=fpindex)\n else:\n dumpedtext='\\n'.join(s for s in indexFileHeaderText)\n vI = array('I')\n vI.append(len(dumpedtext))\n vI.tofile(fpindex)\n fpindex.write(dumpedtext)", "def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()", "def write_eneheader(self,filename,replica):\n \n fheader = open(filename,'w')\n fheader.write('E_pot\\tE_rest(D)\\tD\\tcontact_state\\ttemp\\n')\n fheader.write('# Energy units: Joules/mol\\n')\n fheader.write('# Restrained contact state: ' + repr(replica.mc.restraint.contacts) + '\\n')\n fheader.write('# kspring: '+str(replica.mc.restraint.kspring) + '\\n')\n\tfheader.close()", "def WriteHeader(self):\n return", "def file_write(sp_length, sp_period, header, file_name):\n \n #specify filename and inform write\n out_file = open(file_name, \"w\")\n \n #add headers to file from list\n print(\"{0:>15}\".format(header[0]) ,\\\n \"{0:>15}\".format(header[1]) ,\\\n \"{0:>15}\".format(header[2]), file = out_file)\n \n #add data to file form lists \n for i in range(len(sp_length)):\n print(\"{0:>15}\".format(i) ,\\\n \"{0:>15.3f}\".format(sp_length[i]) ,\\\n \"{0:>15.3f}\".format(sp_period[i]), file = out_file)\n \n #close the file\n out_file.close()", "def write(filename, data, extname=None, extver=None, header=None,\n clobber=False, ignore_empty=False, units=None, table_type='binary',\n names=None, write_bitcols=False, compress=None, tile_dims=None,\n **keys):\n if keys:\n import warnings\n warnings.warn(\n \"The keyword arguments '%s' are being ignored! This warning \"\n \"will be an error in a future version of `fitsio`!\" % keys,\n DeprecationWarning, stacklevel=2)\n\n kwargs = {\n 'clobber': clobber,\n 'ignore_empty': ignore_empty\n }\n with FITS(filename, 'rw', **kwargs) as fits:\n fits.write(data,\n table_type=table_type,\n units=units,\n extname=extname,\n extver=extver,\n compress=compress,\n header=header,\n names=names,\n write_bitcols=write_bitcols,\n tile_dims=tile_dims)", "def write_header(indir, nb_landmark, nb_feature, mirror_factor, order_factor, feature_names=None):\n assert nb_landmark > 0\n assert os.path.exists(indir) and os.path.isdir(indir), indir + \" not found.\"\n if indir[-1] != os.sep:\n indir += os.sep\n axis = [\"x\", \"y\", \"z\"]\n header = \"ID\"\n for numb in range(1, nb_landmark + 1):\n for axe in axis:\n header += \",\" + axe + str(numb)\n if feature_names is not None:\n assert len(feature_names) == nb_feature\n header += \",\" + \",\".join(feature_names)\n else:\n for numb in range(1, nb_feature + 1):\n header += \",Feature\" + str(numb)\n header += \"\\n\"\n with open(indir + \"../landmarks.csv\", \"w\") as filep:\n filep.write(header)\n modif = \"\"\n if mirror_factor is not None:\n modif += \"_reversed\"\n if order_factor is not None:\n modif += \"_reordered\"\n if mirror_factor is not None or order_factor is not None:\n with open(indir + \"../landmarks\" + modif + \".csv\", \"w\") as filep:\n filep.write(header)", "def write_header(self, name, type, level=None, is_aux_number=None):\n names = []\n types = []\n levels = []\n is_aux_numbers = []\n for index in xrange(len(name)):\n names.append(c_str(name[index]))\n types.append(_NP_2_DTYPE[type[index]])\n levels.append(0 if level is None else level[index])\n is_aux_numbers.append(0 if is_aux_number is None else is_aux_number[index])\n check_call(LIB.HPPS_RecordIOWriteHeader(self.handle,\n len(names),\n c_array(ctypes.c_char_p, names),\n c_array(ctypes.c_int, types),\n c_array(ctypes.c_int, levels),\n c_array(ctypes.c_int, is_aux_numbers)))", "def write_header(self, *, version=3.01, file_type='O: Observation', satellite_type='M: Mixed GNSS',\n run_by='GPSLiDAR', organization='CCAR', observer='Adam Dodge', agency='CCAR', receiver_num='1',\n receiver_type='GENERIC_P1', receiver_vers='1.0.0', antenna_number=1, antenna_type='RTK2-F9P',\n delta_pos=[0,0,0]):\n markerstr = 'GPS LiDAR System at ' + self.longname\n if not os.path.isfile(self.fname):\n tstr = self.t.strftime('%Y%m%d %H%M%S')\n # TODO: Fix header (not working in readers)\n r = 6371000 + self.alt\n x = r * np.cos(self.lat * np.pi/180) * np.cos(self.lon * np.pi/180)\n y = r * np.cos(self.lat * np.pi/180) * np.sin(self.lon * np.pi/180)\n z = r * np.sin(self.lat * np.pi/180)\n header = f'{version:>9.2f}{\" \":<11s}{file_type:<20s}{satellite_type:<20s}{\"RINEX VERSION / TYPE\":<20s}\\n' + \\\n f'{run_by:<20s}{organization:<20s}{tstr:<16s}UTC {\"PGM / RUN BY / DATE\":<20s}\\n' + \\\n f'{markerstr:<60}{\"MARKER NAME\":<20s}\\n' + \\\n f'{self.station:<60}{\"MARKER NUMBER\":<20s}\\n' + \\\n f'{\"GEODETIC\":<20s}{\" \":40s}{\"MARKER TYPE\":<20s}\\n' + \\\n f'{observer:<20}{agency:<40}{\"OBSERVER / AGENCY\":<20s}\\n' + \\\n f'{receiver_num:<20}{receiver_type:<20}{receiver_vers:<20}{\"REC # / TYPE / VERS\":<20s}\\n' + \\\n f'{antenna_number:<20}{antenna_type:<40s}{\"ANT # / TYPE\":<20s}\\n' + \\\n f'{x:14.4f}{y:>14.4f}{z:>14.4f}{\" \":18s}{\"APPROX POSITION XYZ\":<20s}\\n' + \\\n f'{delta_pos[0]:14.4f}{delta_pos[1]:>14.4f}{delta_pos[2]:>14.4f}{\" \":18s}{\"ANTENNA: DELTA H/E/N\":<20s}\\n' + \\\n f'G {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'R {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'E {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'S {8:<3d} C1 L1 D1 S1 C5 L5 D5 S5 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'{\"DBHZ\":<60s}{\"SIGNAL STRENGTH UNIT\":<20s}\\n' + \\\n f'{self.t.year:>6d}{self.t.month:>6d}{self.t.day:>6d}{self.t.hour:>6d}{self.t.minute:>6d}' + \\\n f'{self.t.second:>13.7f} UTC{\" \":<9s}{\"TIME OF FIRST OBS\":<20s}\\n' + \\\n f' 0{\" \":54s}{\"RCV CLOCK OFFS APPL\":<20s}\\n' + \\\n f'G{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'R{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'E{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'S{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'{self.leapS:>6d}{\" \":>54s}{\"LEAP SECONDS\":<20s}\\n' + \\\n f'{\" \":>60s}{\"END OF HEADER\":<20s}\\n'\n\n try:\n with open(self.fname, 'w') as f:\n f.write(header)\n except FileNotFoundError:\n print('Data directory is bad. Try again.')\n sys.exit(0)", "def add_header(in_file, file_type):\n\n if file_type == 'bowtie':\n header = \"Read name\\t\" + \"Reference strand\\t\" + \"Name of reference sequence\\t\" \\\n + \"Position alignment occurs\\t\" + \"Read sequence\\t\" + \"Read qualities\\t\" \\\n + \"Ceiling\\t\" + \"Mismatch descriptors\\n\"\n else:\n header = ''\n\n # Temp file for final results including header\n temp_out = tempfile.mkstemp()\n f_in = open(in_file, 'r')\n results = f_in.read()\n f_out = open(temp_out[1] + '.txt', 'w')\n f_out.write(header)\n f_out.write(results)\n\n f_in.close()\n f_out.close()\n return temp_out[1] + '.txt'", "def write_header_in_gz_file(self):\r\n if self.arguments['--out']:\r\n self.file = gzip.open(self.arguments['--out'] + '.gz', \"w+b\")\r\n self.file.write(self.version.encode('utf-8'))\r\n for list_item in self.list_of_header_objects:\r\n self.file.write(list_item.line.encode('utf-8'))\r\n self.file.write(self.body_header_line.line)\r\n self.file.close()\r\n else:\r\n for list_item in self.list_of_header_objects:\r\n print(list_item.line.encode('utf-8'))", "def write(self, filename):\n bvh_string = self.generate_bvh_string()\n if filename[-4:] == '.bvh':\n filename = filename\n else:\n filename = filename + '.bvh'\n with open(filename, 'w') as outfile:\n outfile.write(bvh_string)", "def CosmicFish_write_header(name):\n\n print\n print \"**************************************************************\"\n print \" _____ _ _____ __ \"\n print \" / ___/__ ___ __ _ (_)___/ __(_)__ / / \"\n print \" / /__/ _ \\(_-</ ' \\/ / __/ _// (_-</ _ \\ \"\n print \" \\___/\\___/___/_/_/_/_/\\__/_/ /_/___/_//_/ Py Lib\"\n print \" \"\n print \"**************************************************************\"\n print name\n print \" This application was developed using the CosmicFish code.\"\n print \"**************************************************************\"\n print", "def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n joined_sam = open(os.path.join(args.output_dir, 'watson_joinedAligned.out.sam'))\n merged_sam = open(os.path.join(args.output_dir, 'watson_mergedAligned.out.sam'))\n for line in joined_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n else:\n break\n for line in merged_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n elif not line.startswith('@HD'):\n header_handle.write(line)\n else:\n break\n header_handle.close()\n in_files = {'header':os.path.join(args.output_dir,'header.sam')}\n addRG(in_files, args)\n return args", "def write_opening_header(final_file, **header_params):\n\n final_file.seek(0) # Reset file pointer.\n file_contents = final_file.read() # Save content.\n\n final_file.seek(0) # Write at the top.\n\n if header_params[\"extensions\"]:\n if len(header_params[\"extensions\"]) > 1:\n write_data(\n final_file,\n \"# Title: StevenBlack/hosts with the {0} and {1} extensions\\n#\\n\".format(\n \", \".join(header_params[\"extensions\"][:-1]),\n header_params[\"extensions\"][-1],\n ),\n )\n else:\n write_data(\n final_file,\n \"# Title: StevenBlack/hosts with the {0} extension\\n#\\n\".format(\n \", \".join(header_params[\"extensions\"])\n ),\n )\n else:\n write_data(final_file, \"# Title: StevenBlack/hosts\\n#\\n\")\n\n write_data(\n final_file,\n \"# This hosts file is a merged collection \"\n \"of hosts from reputable sources,\\n\",\n )\n write_data(final_file, \"# with a dash of crowd sourcing via GitHub\\n#\\n\")\n write_data(\n final_file,\n \"# Date: \" + time.strftime(\"%d %B %Y %H:%M:%S (%Z)\", time.gmtime()) + \"\\n\",\n )\n\n if header_params[\"extensions\"]:\n write_data(\n final_file,\n \"# Extensions added to this file: \"\n + \", \".join(header_params[\"extensions\"])\n + \"\\n\",\n )\n\n write_data(\n final_file,\n (\n \"# Number of unique domains: {:,}\\n#\\n\".format(\n header_params[\"numberofrules\"]\n )\n ),\n )\n write_data(\n final_file,\n \"# Fetch the latest version of this file: \"\n \"https://raw.githubusercontent.com/StevenBlack/hosts/master/\"\n + path_join_robust(header_params[\"outputsubfolder\"], \"\").replace(\"\\\\\", \"/\")\n + \"hosts\\n\",\n )\n write_data(\n final_file, \"# Project home page: https://github.com/StevenBlack/hosts\\n\"\n )\n write_data(\n final_file,\n \"# Project releases: https://github.com/StevenBlack/hosts/releases\\n#\\n\",\n )\n write_data(\n final_file,\n \"# ===============================================================\\n\",\n )\n write_data(final_file, \"\\n\")\n\n if not header_params[\"skipstatichosts\"]:\n write_data(final_file, \"127.0.0.1 localhost\\n\")\n write_data(final_file, \"127.0.0.1 localhost.localdomain\\n\")\n write_data(final_file, \"127.0.0.1 local\\n\")\n write_data(final_file, \"255.255.255.255 broadcasthost\\n\")\n write_data(final_file, \"::1 localhost\\n\")\n write_data(final_file, \"::1 ip6-localhost\\n\")\n write_data(final_file, \"::1 ip6-loopback\\n\")\n write_data(final_file, \"fe80::1%lo0 localhost\\n\")\n write_data(final_file, \"ff00::0 ip6-localnet\\n\")\n write_data(final_file, \"ff00::0 ip6-mcastprefix\\n\")\n write_data(final_file, \"ff02::1 ip6-allnodes\\n\")\n write_data(final_file, \"ff02::2 ip6-allrouters\\n\")\n write_data(final_file, \"ff02::3 ip6-allhosts\\n\")\n write_data(final_file, \"0.0.0.0 0.0.0.0\\n\")\n\n if platform.system() == \"Linux\":\n write_data(final_file, \"127.0.1.1 \" + socket.gethostname() + \"\\n\")\n write_data(final_file, \"127.0.0.53 \" + socket.gethostname() + \"\\n\")\n\n write_data(final_file, \"\\n\")\n\n preamble = path_join_robust(BASEDIR_PATH, \"myhosts\")\n maybe_copy_example_file(preamble)\n\n if os.path.isfile(preamble):\n with open(preamble, \"r\") as f:\n write_data(final_file, f.read())\n\n final_file.write(file_contents)", "def _reportDataFile(self, dataFileName, outputFile):\n #subsequent access to the file should be open for \"append\"-ing\n f=self.openFile(outputFile, \"a\") #open for appending\n f.write ('<font face=\"verdana\" color=\" ' +AutoGrader.Const.HEADER_COLOR2 + '\"><br>\\n------------- ' + os.path.split(dataFileName)[1] + ' -------------</font>\\n')\n f.close()", "def csv_make_header(self, fileobj, title, comment=\"\"):\n fileobj.write(csv_line( [\"#Title:\", title] ) )\n fileobj.write(csv_line( [\"#Comment:\", comment] ) )\n #Any other useful comment s trings?\n fileobj.write('#\"First column is the sample phi motor rotation, in radians\"\\n' )\n fileobj.write('#\"Next 6 columns are the XY leg positions in mm, relative to the central (neutral) position.\"\\n' )\n fileobj.write('#\"Next are 2 columns for the stopping criterion parameters.\"\\n' )\n #Line of header info\n fileobj.write(csv_line( ['Phi', 'LegA_X', 'LegA_Y', 'LegB_X', 'LegB_Y', 'LegC_X', 'LegC_Y', 'CountFor', 'CountValue', 'Comment'] ) )", "def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h", "def write_header(self, stream, alignments):\n return\n ##################################################\n # You MUST implement this method in the subclass #\n # if the file format defines a file header. #\n ##################################################", "def append2hdr(self, keyword=None, value=None, ext=False):\n if keyword is not None:\n if ext:\n self.hdr_ext[keyword] = value\n self.write_headerfile(self.hdrfile_ext, self.hdr_ext)\n else:\n self.hdr[keyword] = value\n self.write_headerfile(self.hdrfile, self.hdr)", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.html\", \"w\")\n file.write(\"<html>\\n\\t<head>\\n\\t<style>\\n\" +\n \"\\t\\t\\ttable, th, td {border: 1px solid\\n\" +\n \"\\t\\t\\tblack;border-collapse: collapse;}\" +\n \"\\n\\t</style>\\n\" +\n \"\\t</head>\\n\\t<body>\\n\\t\\t<table style=\\\"width:100%\\\">\\n\")\n file.write(\"\\t\\t\\t<tr>\\n\")\n for line in hdata:\n file.write(\n \"\\t\\t\\t\\t\\t<th>\\n\\t\\t\\t\\t\\t\\t\"\n + str(line) + \"\\n\\t\\t\\t\\t\\t</th>\\n\")\n file.write(\"\\t\\t\\t</tr>\\n\")", "def _writeCommonHeader(self):\n # Line 1 if often overwritten at _fixHeaderLength\n self.header.write(wrapLine(\"NLHEAD_FFI\", self.annotation, self.delimiter, \"%d%s%d\\n\" % (self.NLHEAD, self.delimiter, self.FFI)))\n self.header.write(getAnnotation(\"ONAME\", self.annotation, delimiter = self.delimiter) + stripQuotes(self.ONAME) + \"\\n\")\n self.header.write(getAnnotation(\"ORG\", self.annotation, delimiter = self.delimiter) + stripQuotes(self.ORG) + \"\\n\")\n self.header.write(getAnnotation(\"SNAME\", self.annotation, delimiter = self.delimiter) + stripQuotes(self.SNAME) + \"\\n\")\n self.header.write(getAnnotation(\"MNAME\", self.annotation, delimiter = self.delimiter) + stripQuotes(self.MNAME) + \"\\n\")\n self.header.write(wrapLine(\"IVOL_NVOL\", self.annotation, self.delimiter, \"%d%s%d\\n\" % (self.IVOL, self.delimiter, self.NVOL)))\n line = \"%d %d %d%s%d %d %d\\n\" % (self.DATE[0], self.DATE[1], self.DATE[2], self.delimiter, self.RDATE[0], self.RDATE[1], self.RDATE[2])\n self.header.write(wrapLine(\"DATE_RDATE\", self.annotation, self.delimiter, line))", "def process_header_data(spark, input_dir, output):\n\theader = spark.read \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.option(\"inferSchema\", True) \\\n\t\t.csv(f\"{input_dir}/ams/*/*/ams__header_*__*.csv\") \\\n\t\t.select(*header_cols) \\\n\t\t.where(col('identifier').isNotNull())\n\n\tbill = spark.read \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.option(\"inferSchema\", True) \\\n\t\t.csv(f\"{input_dir}/ams/*/*/ams__billgen_*__*.csv\") \\\n\t\t.select(*bill_cols)\n\n\theader_full = header.join(bill, ['identifier'], how='left')\n\n\theader_full.repartition(1).write.mode('overwrite').format(\"csv\") \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.save(f\"{output}/header/\")", "def writeHedr(self):\n path = os.path.join(self.dir,self.name)\n out = file(path,'r+b')\n out.seek(16) #--Skip to Hedr record data\n self.tes3.hedr.getSize()\n self.tes3.hedr.dump(out)\n out.close()\n #--Done\n self.getHeader()\n self.setMTime()", "def writeHeader( self, file, bAddBeginOfDataChunk = False, nDataSize = -1 ):\n file.write( \"RIFF\" )\n if( nDataSize == -1 ):\n nDataSize = self.nDataSize # default use data size from object\n file.write( struct.pack( \"I\", 4 + nDataSize + 44 + 4 - 16 ) )\n \n file.write( \"WAVE\" )\n file.write( \"fmt \" )\n file.write( struct.pack( \"I\", 16 ) )\n file.write( struct.pack( \"h\", 1) ) # self.nWaveTypeFormat\n file.write( struct.pack( \"h\", self.nNbrChannel) )\n file.write( struct.pack( \"i\", self.nSamplingRate) )\n file.write( struct.pack( \"i\", self.nAvgBytesPerSec) )\n file.write( struct.pack( \"h\", self.nSizeBlockAlign) )\n file.write( struct.pack( \"h\", self.nNbrBitsPerSample) ) \n \n if( bAddBeginOfDataChunk ):\n file.write( \"data\" )\n file.write( struct.pack( \"I\", nDataSize ) )", "def write(self, filename=None):\n if filename is None:\n filename = self.filename\n else:\n self.filename = filename\n self.header[\"sha1sum\"] = self._get_checksum(self.format())\n with open(filename, \"w+\") as handle:\n handle.write(self.format())", "def writeTableHeader(self, fileName, variant=0):\r\n # research\r\n w = slicer.modules.NeedleFinderWidget\r\n l = w.logic\r\n if not variant:\r\n l.exportEvaluation(['user','case','maxTipHD','maxHD', 'avgHD', 'stdHD', 'medHD',\r\n 'nNeedles','nOutliers','outliers',\r\n 'radiusNeedle',\r\n 'lenghtNeedle',\r\n 'radiusMax',\r\n 'numberOfPointsPerNeedle',\r\n 'nbRotatingIterations',\r\n 'stepSize',\r\n 'gradientPonderation',\r\n 'exponent',\r\n 'gaussianAttenuationButton',\r\n 'sigma',\r\n 'algoV',\r\n 'case',\r\n t.strftime(\"%d/%m/%Y\"), t.strftime(\"%H:%M:%S\")\r\n ], fileName)\r\n else:\r\n l.exportEvaluation(['user','case','tipHD','HD', 'man.-seg_', 'ID1', 'ID2',\r\n 'outlier?',\r\n 'radiusNeedle',\r\n 'lenghtNeedle',\r\n 'radiusMax',\r\n 'numberOfPointsPerNeedle',\r\n 'nbRotatingIterations',\r\n 'stepSize',\r\n 'gradientPonderation',\r\n 'exponent',\r\n 'gaussianAttenuationButton',\r\n 'sigma',\r\n 'algoV',\r\n #'case',\r\n t.strftime(\"%d/%m/%Y\"), t.strftime(\"%H:%M:%S\")\r\n ], fileName)", "def _write_header(self, header):\n # write out telescope and source information\n header[\"latitude\"] = self.telescope_location_lat_lon_alt_degrees[0]\n header[\"longitude\"] = self.telescope_location_lat_lon_alt_degrees[1]\n header[\"altitude\"] = self.telescope_location_lat_lon_alt_degrees[2]\n header[\"telescope_name\"] = np.string_(self.telescope_name)\n header[\"instrument\"] = np.string_(self.instrument)\n header[\"object_name\"] = np.string_(self.object_name)\n\n # write out required UVParameters\n header[\"Nants_data\"] = self.Nants_data\n header[\"Nants_telescope\"] = self.Nants_telescope\n header[\"Nbls\"] = self.Nbls\n header[\"Nblts\"] = self.Nblts\n header[\"Nfreqs\"] = self.Nfreqs\n header[\"Npols\"] = self.Npols\n header[\"Nspws\"] = self.Nspws\n header[\"Ntimes\"] = self.Ntimes\n header[\"antenna_numbers\"] = self.antenna_numbers\n header[\"uvw_array\"] = self.uvw_array\n header[\"vis_units\"] = np.string_(self.vis_units)\n header[\"channel_width\"] = self.channel_width\n header[\"time_array\"] = self.time_array\n header[\"freq_array\"] = self.freq_array\n header[\"integration_time\"] = self.integration_time\n header[\"lst_array\"] = self.lst_array\n header[\"polarization_array\"] = self.polarization_array\n header[\"spw_array\"] = self.spw_array\n header[\"ant_1_array\"] = self.ant_1_array\n header[\"ant_2_array\"] = self.ant_2_array\n header[\"antenna_positions\"] = self.antenna_positions\n\n # handle antenna_names; works for lists or arrays\n header[\"antenna_names\"] = np.asarray(self.antenna_names, dtype=\"bytes\")\n\n # write out phasing information\n header[\"phase_type\"] = np.string_(self.phase_type)\n if self.phase_center_ra is not None:\n header[\"phase_center_ra\"] = self.phase_center_ra\n if self.phase_center_dec is not None:\n header[\"phase_center_dec\"] = self.phase_center_dec\n if self.phase_center_epoch is not None:\n header[\"phase_center_epoch\"] = self.phase_center_epoch\n if self.phase_center_frame is not None:\n header[\"phase_center_frame\"] = np.string_(self.phase_center_frame)\n\n # write out optional parameters\n if self.dut1 is not None:\n header[\"dut1\"] = self.dut1\n if self.earth_omega is not None:\n header[\"earth_omega\"] = self.earth_omega\n if self.gst0 is not None:\n header[\"gst0\"] = self.gst0\n if self.rdate is not None:\n header[\"rdate\"] = np.string_(self.rdate)\n if self.timesys is not None:\n header[\"timesys\"] = np.string_(self.timesys)\n if self.x_orientation is not None:\n header[\"x_orientation\"] = np.string_(self.x_orientation)\n if self.blt_order is not None:\n header[\"blt_order\"] = np.string_(\", \".join(self.blt_order))\n if self.antenna_diameters is not None:\n header[\"antenna_diameters\"] = self.antenna_diameters\n if self.uvplane_reference_time is not None:\n header[\"uvplane_reference_time\"] = self.uvplane_reference_time\n if self.eq_coeffs is not None:\n header[\"eq_coeffs\"] = self.eq_coeffs\n if self.eq_coeffs_convention is not None:\n header[\"eq_coeffs_convention\"] = np.string_(self.eq_coeffs_convention)\n\n # write out extra keywords if it exists and has elements\n if self.extra_keywords:\n extra_keywords = header.create_group(\"extra_keywords\")\n for k in self.extra_keywords.keys():\n if isinstance(self.extra_keywords[k], str):\n extra_keywords[k] = np.string_(self.extra_keywords[k])\n else:\n extra_keywords[k] = self.extra_keywords[k]\n\n # write out history\n header[\"history\"] = np.string_(self.history)\n\n return", "def write(self, filename, header=None):\n\n origfile = self._filename\n\n try:\n with open(filename, 'w') as _file:\n self.writestream(_file, header)\n self._filename = filename\n return True\n\n except IOError:\n self._filename = origfile\n return False", "def generateNHDRHeader(self, inputFile):\r\n\r\n logging.info('Processing started')\r\n #initialize PCR object\r\n imagePCRFile = PCRDataObject()\r\n #import image parameters of PCR object\r\n imagePCRFile.ImportFromFile(inputFile)\r\n\r\n filePathName, fileExtension = os.path.splitext(inputFile)\r\n #The directory of the .nhdr file\r\n nhdrPathName = filePathName + \".nhdr\"\r\n\r\n if fileExtension == \".pcr\":\r\n if imagePCRFile.form == 1 or imagePCRFile.form == 5 or imagePCRFile.form == 10:\r\n with open(nhdrPathName, \"w\") as headerFile:\r\n headerFile.write(\"NRRD0004\\n\")\r\n headerFile.write(\"# Complete NRRD file format specification at:\\n\")\r\n headerFile.write(\"# http://teem.sourceforge.net/nrrd/format.html\\n\")\r\n if imagePCRFile.form == 5:\r\n headerFile.write(\"type: ushort\\n\")\r\n elif imagePCRFile.form == 10:\r\n headerFile.write(\"type: float\\n\")\r\n elif imagePCRFile.form == 1:\r\n headerFile.write(\"type: uchar\\n\")\r\n headerFile.write(\"dimension: 3\\n\")\r\n headerFile.write(\"space: left-posterior-superior\\n\")\r\n sizeX = imagePCRFile.X\r\n sizeY = imagePCRFile.Y\r\n sizeZ = imagePCRFile.Z\r\n headerFile.write(f\"sizes: {sizeX} {sizeY} {sizeZ}\\n\")\r\n volSpace = imagePCRFile.voxelSize\r\n headerFile.write(f\"space directions: ({volSpace}, 0.0, 0.0) (0.0, {volSpace}, 0.0) (0.0, 0.0, {volSpace})\\n\")\r\n headerFile.write(\"kinds: domain domain domain\\n\")\r\n headerFile.write(\"endian: little\\n\")\r\n headerFile.write(\"encoding: raw\\n\")\r\n headerFile.write(\"space origin: (0.0, 0.0, 0.0)\\n\")\r\n volPathName = filePathName + \".vol\"\r\n volPathSplit = []\r\n volPathSplit = volPathName.split('/')\r\n volFileName = volPathSplit[len(volPathSplit)-1]\r\n headerFile.write(f\"data file: {volFileName}\\n\")\r\n # print(imagePCRFile.form)\r\n print(f\".nhdr file path is: {nhdrPathName}\")\r\n #Automatically loading .vol file using the generated .nhdr file.\r\n if os.path.exists(volPathName):\r\n slicer.util.loadVolume(nhdrPathName)\r\n print(f\"{volFileName} loaded\\n\")\r\n else:\r\n print(f\"{volFileName} is not in the same directory\\n\")\r\n else:\r\n print(\"The format of this dataset is currently not supported by this module. Currently only float (format=10), unsigned 16 bit integer (format=5) and unsigned 8 bit integer (format=1) data types are supported. Please contact us with this dataset to enable this data type.\")\r\n else:\r\n print(\"This is not a PCR file, please re-select a PCR file\")", "def _write_gen_header(self, Index=False, FLAGS=None):\n if FLAGS is None:\n FLAGS = []\n FTEXT, FHCRC, FEXTRA, FNAME = 1, 2, 4, 8 # extra field bit flags\n current_time = int(time.time())\n time_byte = struct.pack(\"<L\", current_time)\n self.generic_header[\"DATE\"] = time_byte\n if Index:\n self.generic_header[\"FLAGS\"] = b\"\\x10\"\n if FLAGS is not None:\n if \"FTEXT\" in FLAGS:\n self.generic_header[\"FLAGS\"] = self.generic_header[\"FLAGS\"] & FTEXT\n\n if \"FHCRC\" in FLAGS:\n header_crc32 = 0\n self.generic_header[\"FLAGS\"] = self.generic_header[\"FLAGS\"] & FHCRC\n for byte in self.generic_header.values():\n header_crc32 = zlib.crc32(byte, header_crc32)\n\n if \"FEXTRA\" in FLAGS:\n self.generic_header[\"FLAGS\"] = self.generic_header[\"FLAGS\"] & FEXTRA\n\n if \"FNAME\" in FLAGS:\n self.generic_header[\"FLAGS\"] = self.generic_header[\"FLAGS\"] & FNAME\n\n for value in self.generic_header.values():\n self.file_out.write(value)\n if \"FEXTRA\" in FLAGS:\n # WRITE EXTRA FIELD\n pass\n\n if \"FNAME\" in FLAGS:\n # WRITE FNAME FIELD\n fName = self.file_name.split(\"/\")[-1]\n\n if Index:\n self.generic_header[\"FLAGS\"] = b\"\\x00\"\n self.file_out.write(self.index_magic_bytes)\n self.file_out.write(struct.pack(\"<B\", self.max_idx_len))\n self.file_out.write(struct.pack(\"<B\", self.max_offset_len))\n self.index_offset = self.file_out.tell()\n self._allocate_index_bytes()\n\n if \"FHCRC\" in FLAGS:\n # WRITE checksum for header\n pass\n\n return self.file_out.tell()", "def write_file(data, filename):\n file = open(filename, \"wb\")\n file.write(data)\n file.close()", "def fasta_header(path, new_path):\n with open(path, 'r') as f_in:\n with open(new_path, 'w+') as f_out:\n records = SeqIO.parse(f_in, 'fasta')\n for record in records:\n record.id = record.id.split(\" \")[0]\n record.description = record.id.split(\" \")[0]\n SeqIO.write(record, f_out, 'fasta')\n return new_path", "def write_fenth_out4fp(fname,dH,vol):\n with open(fname,'w') as f:\n cmd = ' '.join(s for s in sys.argv)\n f.write('# Output at {0:s} from,\\n'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n f.write('# {0:s}\\n'.format(cmd))\n #...Num of data, weight for the data\n f.write(' {0:6d} {1:7.3f}\\n'.format(2, 1.0))\n f.write(' {0:8.3f} {1:8.3f}\\n'.format(dH,vol))\n return None", "def __gen_header( files: List[str] ) -> bytes:\n\n number_of_files = len( files )\n\n LOGGER( \"info\", \"Generating header...\" )\n\n # calculate sizes\n stringTable = '\\x00'.join([os.path.basename(file) for file in files])\n headerSize = 0x10 + (number_of_files)*0x18 + len(stringTable)\n remainder = 0x10 - headerSize%0x10\n\n # add padding to a multible of 0x10\n headerSize += remainder\n \n # get file information\n fileSizes = [os.path.getsize(file) for file in files]\n fileOffsets = [sum(fileSizes[:n]) for n in range(number_of_files)]\n \n # string table calculations\n fileNamesLengths = [len(os.path.basename(file))+1 for file in files] # +1 for the \\x00 separator\n stringTableOffsets = [sum(fileNamesLengths[:n]) for n in range(number_of_files)]\n \n\n # assemble header\n\n header = b'PFS0'\n header += pk('<I', number_of_files)\n header += pk('<I', len(stringTable)+remainder)\n header += b'\\x00\\x00\\x00\\x00'\n \n # add file info\n for n in range(number_of_files):\n header += pk('<Q', fileOffsets[n])\n header += pk('<Q', fileSizes[n])\n header += pk('<I', stringTableOffsets[n])\n header += b'\\x00\\x00\\x00\\x00'\n header += stringTable.encode()\n header += remainder * b'\\x00'\n \n LOGGER( \"info\", \"header successfully created.\" )\n\n return header", "def write_header(self, total_blocks):\n self.write_string('ASEF')\n self.write('2H', (1, 0))\n self.write('i', total_blocks)", "def writeHeaderOutput(self, com, output, prettyname):\n # couple of strings we use\n titleString = \" (c) Cambridge Silicon Radio Limited \"\n titleString += datetime.datetime.now().strftime(\"%Y\")\n rightsString = \" All rights reserved and confidential information of CSR\"\n revString = \" REVISION: $Revision$\"\n genString = \"This file was autogenerated from:\"\n genString2 = \"Using %s version %s, on %s\" % (getFile(), getVersion(), self.timeStamp)\n\n # how long at the comment characters\n lineLen = DEFAULT_WIDTH - len(com[0]) - len(com[1])\n\n # some lines we use\n starLine = \"*\" * lineLen\n gapLine = \" \" * lineLen\n\n # bumf required at the head of files that might be released\n out = [starLine, gapLine,\n (\"%%-%ds\" % lineLen) % titleString,\n gapLine,\n (\"%%-%ds\" % lineLen) % rightsString,\n gapLine,\n (\"%%-%ds\" % lineLen) % revString,\n starLine,\n # bumf that says file was autogenerated\n starLine,\n (\" %%-%ds\" % (lineLen-1)) % genString,\n (\" %%-%ds\" % (lineLen-3)) % self.source,\n (\" %%-%ds\" % (lineLen-1)) % genString2,\n starLine]\n\n output.write( \"\\n\\n\"+\"\".join( \"%s%s%s\\n\" % (com[0], o, com[1]) for o in out) + \"\\n\\n\" )", "def update_header(fopen):\n json_start = fopen.tell()\n fopen.seek(52, 0)\n fopen.write(struct.pack('<Q', json_start))\n fopen.seek(json_start)", "def write_name_file(self):\n fn_path = os.path.join(self.model_ws, self.mpnamefile)\n f_nam = open(fn_path, 'w')\n f_nam.write('%s\\n' % (self.heading))\n if self.mpbas_file is not None:\n f_nam.write('%s %3i %s\\n' % ('MPBAS', 86, self.mpbas_file))\n if self.dis_file is not None:\n f_nam.write('%s %3i %s\\n' % ('DIS', self.dis_unit, self.dis_file))\n if self.head_file is not None:\n f_nam.write('%s %3i %s\\n' % ('HEAD', 88, self.head_file))\n if self.budget_file is not None:\n f_nam.write('%s %3i %s\\n' % ('BUDGET', 89, self.budget_file))\n for u, f in zip(self.external_units, self.external_fnames):\n f_nam.write('DATA {0:3d} '.format(u) + f + '\\n')\n f_nam.close()", "def print_header(name, texfile):\n texfile.write('\\n')\n texfile.write('%--------------------\\n')\n texfile.write('%---' + name.upper() + ('-' * (17 - len(name))) + '\\n')\n texfile.write('%--------------------\\n')", "def writeHtk(filename, feature, sampPeriod, parmKind):\n with open(filename, \"wb\") as f:\n # Write header\n nSamples = feature.shape[0]\n sampSize = feature.shape[1] * 4\n f.write(struct.pack(\">iihh\", nSamples, sampPeriod, sampSize, parmKind))\n f.write(struct.pack(\">%df\" % (nSamples * sampSize / 4), *feature.ravel()))", "def modify_header():\n\n print_debug_info()\n if not bool(int(vim.eval(\"g:BHModify\"))):\n return\n\n if not should_do_write():\n debug(\"should not write this buffer.\")\n return\n\n if not has_header():\n debug(\"This file has no header.\")\n return add_header()\n\n # only if the suffix is supported and we have a method to strip the comment.\n if not ((\"extract_comment_%s\" % SUFFIX) in globals() and suffix_is_supported()):\n return\n\n comment = globals()[\"extract_comment_%s\" % SUFFIX]()\n debug(\"comment: %s\" % str(comment))\n if not comment:\n debug(\"comment is empty\")\n return\n\n comment_dict = {}\n\n if len(comment) < 3:\n # Less than 3 lines of original comment, put them in Description part.\n comment_dict['Description'] = '\\n'.join(comment)\n else:\n comment_dict = read_comment(comment)\n if \"\" in comment_dict:\n del comment_dict[\"\"]\n new_header_dict = read_comment(globals().get(\"%s_header\" % SUFFIX).rstrip().splitlines())\n debug(\"new\")\n debug(set(new_header_dict.keys()))\n debug(set(comment_dict.keys()))\n debug(\"end\")\n if not set(new_header_dict.keys()) == set(comment_dict.keys()):\n return prepend_header(render_header(comment_dict))\n else:\n debug(\"do not modify header since we already have the same header.\")", "def write(self, prefix, path=None):\n\n if path is None:\n path = os.getcwd()\n\n header, source = self.doprint(prefix=prefix)\n\n with open(os.path.join(path, prefix + '.h'), 'w') as f:\n f.write(header)\n\n with open(os.path.join(path, prefix + '.c'), 'w') as f:\n f.write(source)", "def test_write_sff_header(self):\r\n expected = \"\"\"Common Header:\r\n Magic Number:\\t0x2E736666\r\n Version:\\t0001\r\n Index Offset:\\t7773224\r\n Index Length:\\t93365\r\n # of Reads:\\t114\r\n Header Length:\\t440\r\n Key Length:\\t4\r\n # of Flows:\\t400\r\n Flowgram Code:\\t1\r\n Flow Chars:\\tTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACG\r\n Key Sequence:\\tTCAG\r\n\"\"\".split('\\n')\r\n header = {'Version': \"0001\",\r\n 'Magic Number': '0x2E736666',\r\n 'Index Offset': '7773224',\r\n 'Index Length': '93365',\r\n '# of Reads': '114',\r\n 'Header Length': '440',\r\n 'Key Length': '4',\r\n '# of Flows': '400',\r\n 'Flowgram Code': '1',\r\n 'Flow Chars':\r\n 'TACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACG',\r\n 'Key Sequence': 'TCAG'}\r\n\r\n fd, tmp_name = mkstemp(prefix=\"test_write_sff_header\")\r\n close(fd)\r\n fh = open(tmp_name, \"w\")\r\n write_sff_header(header, fh, num=400)\r\n fh.close()\r\n fh = open(tmp_name, \"U\")\r\n lines = list(fh)\r\n remove(tmp_name)\r\n self.assertItemsEqual(lines, map(lambda a: a + \"\\n\", expected))", "def write_header(_metadata, rename_padding=False):\n template = \"\"\"\\\n VERSION {version}\n FIELDS {fields}\n SIZE {size}\n TYPE {type}\n COUNT {count}\n WIDTH {width}\n HEIGHT {height}\n VIEWPOINT {viewpoint}\n POINTS {points}\n DATA {data}\n \"\"\"\n str_metadata = _metadata.copy()\n\n if not rename_padding:\n str_metadata['fields'] = ' '.join(_metadata['fields'])\n else:\n new_fields = []\n for f in _metadata['fields']:\n if f == '_':\n new_fields.append('padding')\n else:\n new_fields.append(f)\n str_metadata['fields'] = ' '.join(new_fields)\n str_metadata['size'] = ' '.join(map(str, _metadata['size']))\n str_metadata['type'] = ' '.join(_metadata['type'])\n str_metadata['count'] = ' '.join(map(str, _metadata['count']))\n str_metadata['width'] = str(_metadata['width'])\n str_metadata['height'] = str(_metadata['height'])\n str_metadata['viewpoint'] = ' '.join(map(str, _metadata['viewpoint']))\n str_metadata['points'] = str(_metadata['points'])\n tmpl = template.format(**str_metadata)\n return tmpl", "def _fix_l1b_header(filename):\n try:\n # First try it with the astropy .to_string() method, as this is the easiest.\n hdr = fits.getheader(filename)\n hdr_str = hdr.tostring()\n except Exception:\n # Read the file manually as bytes until we hit a UnicodeDecodeError, i.e.\n # until we reach the data part. Since astropy version 4.2.1, we can't use\n # the .to_string() method anymore because of FITS header consistency checks\n # that cannot be overridden, and they won't fix it unfortunately. If the\n # input file is a .gz file, we need to unpack it first to the tmp directory.\n temp_dir = tempfile.gettempdir()\n name = Path(filename).name\n is_gz_file = False\n if name.endswith(\".gz\"):\n is_gz_file = True\n with gzip.open(filename, \"r\") as gfile:\n filename = str(Path(temp_dir) / name[:-3])\n with open(filename, \"wb\") as file_out:\n file_out.write(gfile.read())\n hdr_str = \"\"\n with open(filename, \"rb\") as file:\n counter = 1\n while True:\n try:\n this_line = file.read(counter)\n this_str = this_line.decode(\"utf-8\")\n hdr_str += this_str\n counter += 1\n except UnicodeDecodeError:\n break\n if is_gz_file:\n os.remove(filename)\n # Make a list of strings with a length of 80\n hdr_list = [hdr_str[i : i + 80] for i in range(0, len(hdr_str), 80)]\n # Remove all the empty entries\n while \" \" * 80 in hdr_list:\n hdr_list.remove(\" \" * 80)\n hdr_list_new = []\n for count, item in enumerate(hdr_list):\n if count <= len(hdr_list) - 2:\n if (\n hdr_list[count][0:8] != \"CONTINUE\"\n and hdr_list[count + 1][0:8] != \"CONTINUE\"\n ):\n hdr_list_new.append(hdr_list[count])\n else:\n if (\n hdr_list[count][0:8] != \"CONTINUE\"\n and hdr_list[count + 1][0:8] == \"CONTINUE\"\n ):\n ampersand_pos = hdr_list[count].find(\"&\")\n if ampersand_pos != -1:\n new_entry = hdr_list[count][0:ampersand_pos]\n else:\n raise RuntimeError(\n \"There should be an ampersand at the end of a CONTINUE'd keyword.\"\n )\n tmp_count = 1\n while hdr_list[count + tmp_count][0:8] == \"CONTINUE\":\n ampersand_pos = hdr_list[count + tmp_count].find(\"&\")\n if ampersand_pos != -1:\n first_sq_pos = hdr_list[count + tmp_count].find(\"'\")\n if first_sq_pos != -1:\n new_entry = (\n new_entry\n + hdr_list[count + tmp_count][\n first_sq_pos + 1 : ampersand_pos\n ]\n )\n else:\n raise RuntimeError(\n \"There should be two single quotes after CONTINUE. Did not find any.\"\n )\n else:\n # If there is no ampersand at the end anymore, it means the entry ends here.\n # Read from the first to the second single quote in this case.\n first_sq_pos = hdr_list[count + tmp_count].find(\"'\")\n if first_sq_pos != -1:\n second_sq_pos = hdr_list[count + tmp_count][\n first_sq_pos + 1 :\n ].find(\"'\")\n if second_sq_pos != -1:\n new_entry = (\n new_entry\n + hdr_list[count + tmp_count][\n first_sq_pos\n + 1 : second_sq_pos\n + 1\n + first_sq_pos\n ].rstrip()\n + \"'\"\n )\n else:\n raise RuntimeError(\n \"There should be two single quotes after CONTINUE. Found the first, but not the second.\"\n )\n else:\n raise RuntimeError(\n \"There should be two single quotes after CONTINUE. Did not find any.\"\n )\n tmp_count += 1\n hdr_list_new.append(new_entry)\n else:\n continue\n else:\n # Add END at the end of the header\n hdr_list_new.append(hdr_list[count])\n # Now we stitch together the CONTINUE information correctly,\n # with a \"\\n\" at the end that we use as a separator later on\n # when we convert from a string to an astropy header.\n for count, item in enumerate(hdr_list_new):\n if len(item) > 80:\n this_entry = item[0:78] + \"&'\\n\"\n rest = \"CONTINUE '\" + item[78:]\n while len(rest) > 80:\n this_entry = this_entry + rest[0:78] + \"&'\\n\"\n rest = \"CONTINUE '\" + rest[78:]\n this_entry = this_entry + rest\n hdr_list_new[count] = this_entry\n # Now we should have the correct list of strings. Since we can't convert a list to a\n # FITS header directly, we have to convert it to a string first, separated by \"\\n\".\n hdr_str_new = \"\\n\".join([str(item) for item in hdr_list_new])\n hdr_corr = fits.Header.fromstring(hdr_str_new, sep=\"\\n\")\n return hdr_corr", "def write_file(self):\r\n # -open file for writing\r\n f_fbob = open(self.fn_path, 'w')\r\n\r\n # -write header\r\n f_fbob.write('%s\\n' % (self.heading))\r\n\r\n # -write sections 1 & 2 : NOTE- what about NOPRINT?\r\n f_fbob.write('%10i%10i%10i%10i\\n' % (self.nqfb, self.nqcfb,\r\n self.nqtfb, self.iufbobsv))\r\n f_fbob.write('%10e\\n' % (self.tomultfb)) # check format\r\n\r\n # -write sections 3-5 looping through observations groups\r\n c = 0\r\n for i in range(self.nqfb):\r\n # while (i < self.nqfb):\r\n # write section 3\r\n f_fbob.write('{:10d}{:10d}\\n'.format(self.nqobfb[i],\r\n self.nqclfb[i]))\r\n\r\n # Loop through observation times for the groups\r\n for j in range(self.nqobfb[i]):\r\n # -write section 4\r\n f_fbob.write(\r\n '{}{:10d}{:10.4g}{}{:10.4g}\\n'.format(self.obsnam[c],\r\n self.irefsp[c],\r\n self.toffset[c], ' ',\r\n self.flwobs[c]))\r\n c += 1 # index variable\r\n\r\n # -write section 5 - NOTE- need to adjust factor for muliple obs same cell\r\n for j in range(abs(self.nqclfb[i])):\r\n if self.nqclfb[\r\n i] < 0: # set factor to 1.0 for all cells in group\r\n self.factor[i, :] = 1.0\r\n f_fbob.write('{:10d}{:10d}{:10d}{}{:10f}\\n'\r\n .format(self.layer[i, j], (self.row[i, j]),\r\n self.column[i, j],\r\n ' ', self.factor[\r\n i, j])) # note- is 10f good enough here?\r\n\r\n f_fbob.close()\r\n #\r\n # swm: BEGIN hack for writing standard file\r\n sfname = self.fn_path # swm:hack\r\n sfname += '_ins' # swm: hack\r\n # write header\r\n f_ins = open(sfname, 'w') # swm: hack for standard file\r\n f_ins.write('jif @\\n') # swm: hack for standard file\r\n f_ins.write('StandardFile 0 1 %s\\n' % (\r\n self.nqtfb)) # swm: hack for standard file\r\n for i in range(0, self.nqtfb):\r\n f_ins.write(\r\n '{}\\n'.format(self.obsnam[i])) # swm: hack for standard file\r\n\r\n f_ins.close()\r\n # swm: END hack for writing standard file\r\n\r\n return", "def write_data_to_file(data, filename):\n with open(filename, 'wb') as outfile:\n outfile.write(data)", "def seqIo_newHeader(fName, info):\n d, n = os.path.split(fName)\n if d==[]:d='./'\n tName=fName[:-4] + '_new' + time.strftime(\"%d_%m_%Y\") + fName[-4:]\n sr = seqIo_reader(fName)\n sw = seqIo_writer(tName,info)\n n=sr.header['numFrames']\n for f in range(n):\n I,ts=sr.getFrame(f)\n sw.addFrame(I,ts)\n sr.close()\n sw.close()", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.csv\", \"w\")\n file.write(str(\",\".join(hdata)) + \"\\n\")", "def add_headers(headers, out):\r\n out.write(common.to_csv_line(headers, \"efficient\"))", "def _write_textual_header(self, file):\n length = len(self.textual_file_header)\n # Append spaces to the end if its too short.\n if length < 3200:\n textual_header = self.textual_file_header + b' ' * (3200 - length)\n elif length == 3200:\n textual_header = self.textual_file_header\n # The length must not exceed 3200 byte.\n else:\n msg = 'self.textual_file_header is not allowed to be longer ' + \\\n 'than 3200 bytes'\n raise SEGYWritingError(msg)\n if self.textual_header_encoding.upper() == 'ASCII':\n pass\n elif self.textual_header_encoding.upper() == 'EBCDIC':\n textual_header = \\\n textual_header.decode('ascii').encode('EBCDIC-CP-BE')\n # Should not happen.\n else:\n msg = 'self.textual_header_encoding has to be either ASCII or ' + \\\n 'EBCDIC.'\n raise SEGYWritingError(msg)\n file.write(textual_header)", "def create_header(analysis_outdir, metadata, rg_dict, specimen_dict, logger=default_logger):\n\n rgid = rg_dict[\"ID\"].replace(\".\", \"_\")\n header = \"%s/header-%s.sam\" %(analysis_outdir, rg_dict[\"ID\"])\n header_file = open(header, \"w\")\n header_file.write(\"@HD\\tVN:1.4\\n\")\n PI_STR = \"\"\n if len(rg_dict[\"PI\"]):\n PI_STR=\"PI:%s\\t\" % (rg_dict[\"PI\"])\n header_file.write(\"@RG\\tID:%s:%s\\tCN:%s\\tPL:%s\\tPM:%s\\tLB:%s:%s:%s\\t%sSM:%s\\tPU:%s:%s\\tDT:%s\\n\"\n %(metadata[\"center_name\"], rgid,metadata[\"center_name\"], metadata[\"platform\"],metadata[\"platform_model\"], metadata[\"seqtype\"],\n metadata[\"center_name\"], rg_dict[\"LB\"], PI_STR, metadata[\"aliquot_id\"], rg_dict[\"CN\"], rg_dict[\"PU\"], getUTCDate(rg_dict[\"DT\"])))\n header_file.write(\"@CO\\tdcc_project_code:%s-US\\n\" %metadata[\"disease\"])\n header_file.write(\"@CO\\tsubmitter_donor_id:%s\\n\" %metadata[\"participant_id\"])\n header_file.write(\"@CO\\tsubmitter_specimen_id:%s\\n\" %metadata[\"sample_id\"])\n header_file.write(\"@CO\\tsubmitter_sample_id:%s\\n\" %metadata[\"aliquot_id\"])\n\n if metadata[\"sample_type\"] not in specimen_dict:\n msg = \"sample_type %s not found in specimen mapping\" % metadata[\"sample_type\"]\n logger.error(msg)\n if not FORCE_RUN:\n raise HeaderException(msg)\n\n if \"sample_type\" in metadata and metadata[\"sample_type\"] in specimen_dict:\n (icgc_type, sample_class) = specimen_dict[metadata[\"sample_type\"]]\n else:\n icgc_type = \"unknown\"\n sample_class = \"unknown\"\n\n #Sanity check about use_cntl\n if \"use_cntl\" in metadata:\n if metadata[\"use_cntl\"] == \"N/A\" and sample_class == \"tumour\":\n msg = \"Tumour sample requires use_cntl, set to %s. Are your IDs in the wrong order?\" % metadata[\"use_cntl\"]\n logger.error(msg)\n raise HeaderException(msg)\n if sample_class == \"normal\" and metadata[\"use_cntl\"] != \"N/A\":\n msg = \"Normal sample requires N/A use_cntl, set to %s. Are your IDs in the wrong order?\" % metadata[\"use_cntl\"]\n logger.error(msg)\n raise HeaderException(msg)\n\n header_file.write(\"@CO\\tdcc_specimen_type:%s\\n\" % icgc_type)\n header_file.write(\"@CO\\tuse_cntl:%s\\n\" %(metadata.get(\"use_cntl\", \"NA\")))\n header_file.close()\n return header", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def header(self, version='1.1'):\n fPDF = open(self.filename, 'w')\n fPDF.write('%%PDF-%s\\n' % version)\n fPDF.close()", "def pareHeader(headerFile,Ldontcares=['GData','BiasCoeff','headerFile','y_m_d','TimeZero']):\n reload(chd) # KEN SCOPE ISSUE?\n dHeader = chd.main(['headerFile=' + headerFile])\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d','TimeZero']\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d']\n for k in Ldontcares:\n del dHeader[k]\n dataFile = split(headerFile,'.header')[0] # toss extension\n return dHeader,dataFile", "def writeHeader(self):\n try:\n saveBuf = self.buffer\n self.buffer = []\n ## This export format needs the RDF namespace to be defined, add a\n ## prefix for it if there isn't one yet.\n self.setNamespace(\"rdf\", RDF.NAMESPACE, False)\n self.write(\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n\")\n self.writeStartOfStartTag(RDF.NAMESPACE, \"RDF\")\n for name, prefix in self.namespaceTable.iteritems():\n self.writeNewLine()\n self.writeIndent()\n self.write(\"xmlns\")\n if len(prefix) > 0: \n self.write(':')\n self.write(prefix) \n self.write(\"=\\\"\")\n self.write(xmlutil.escapeDoubleQuotedAttValue(name))\n self.write(\"\\\"\")\n self.writeEndOfStartTag()\n self.writeNewLine()\n finally: \n self.headerBuffer = self.buffer\n self.buffer = saveBuf\n self.headerWritten = True", "def write_ldat_header(self, datapath):\n contents = {}\n contents['ldat_type'] = self.ldat_type\n contents['filenametime'] = self.filenametime\n contents['station_id'] = self.station_id\n contents['rcusetup_cmds'] = self.rcusetup_cmds\n contents['beamctl_cmds'] = self.beamctl_cmds\n contents['rspctl_cmds'] = self.rspctl_cmds\n if self.caltabinfos != []:\n contents['caltabinfos'] = self.caltabinfos\n if self.septonconf:\n contents['septonconf'] = self.septonconf\n\n if not self.isLOFARdatatype(self.ldat_type):\n raise ValueError(\"Unknown LOFAR statistic type {}.\"\\\n .format(self.ldat_type))\n xtra = ''\n if self.ldat_type == 'acc':\n xtra = '_512x192x192'\n ldat_header_filename = (self.filenametime + '_' + self.ldat_type\n + xtra + '.h')\n with open(os.path.join(datapath, ldat_header_filename), 'w') as f:\n f.write('# LCU obs settings, header file\\n')\n f.write('# Header version'+' '+self.headerversion+'\\n')\n yaml.dump(contents, f, default_flow_style=False, width=1000)", "def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()", "def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h", "def write_data(self, filename,\n columns=('Q', 'R', 'dR'),\n header=None):\n if header is None:\n header = \"# %s\\n\"%' '.join(columns)\n with open(filename, 'wb') as fid:\n fid.write(asbytes(header))\n data = np.vstack([getattr(self, c) for c in columns])\n np.savetxt(fid, data.T)", "def writeFile(filename):\n\n with open(filename, \"w\") as f:\n Write.__writeHeader(f)\n\n Write.__openRow(f)\n i = 0\n for elm in Parse.data:\n while i != elm[\"position\"]:\n Write.__writeEmptyElement(f)\n i += 1\n\n Write.__writeElement(f, elm)\n i += 1\n\n if elm[\"position\"] == 17:\n i = 0\n Write.__closeRow(f)\n if elm[\"number\"] != 118:\n Write.__openRow(f)\n\n Write.__writeFooter(f)", "def _write(self):\n f = FortranFile(self.filename,mode='w')\n # Default omnivor binary header\n f.writeInts ( self.data['MK'] , 'i' ) \n f.writeInts ( self.data['itime'] , 'i' ) \n f.writeString ( self.data['version'] ) \n f.writeInts ( self.data['file_id'] , 'i' ) \n f.writeString ( self.data['sversion'] ) \n # Velocity field\n f.writeString ( self.data['stype'] ) \n f.writeInts ( self.data['is_grid'] , 'i' ) \n f.writeInts ( self.data['nCPs'] , 'i' ) \n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n f.writeInts ( self.data['n1'] , 'i' ) \n f.writeInts ( self.data['n2'] , 'i' ) \n f.writeInts ( self.data['n3'] , 'i' ) \n f.writeInts ( self.data['is_straight'] , 'i' ) \n f.writeReals ( self.data['v1'] , real_char ) \n f.writeReals ( self.data['v2'] , real_char ) \n f.writeReals ( self.data['v3'] , real_char ) \n\n CPs = self.data['CPs'].flatten(order = 'F')\n Utot = self.data['Utot'].flatten(order = 'F')\n f.writeReals(CPs,real_char)\n f.writeReals(Utot,real_char)" ]
[ "0.7826132", "0.70860624", "0.6762291", "0.6738056", "0.67234135", "0.67085034", "0.66346943", "0.6610399", "0.6607992", "0.6557405", "0.6481333", "0.6448737", "0.64438635", "0.6427536", "0.64213383", "0.6419421", "0.6363067", "0.63381046", "0.631573", "0.63024944", "0.62987816", "0.62825197", "0.6276485", "0.6192612", "0.61547595", "0.6150514", "0.6146963", "0.6146572", "0.61051464", "0.6102049", "0.60948944", "0.6089645", "0.6073691", "0.60457236", "0.60387933", "0.60334206", "0.60126364", "0.6008319", "0.5996688", "0.59950674", "0.597781", "0.59581906", "0.59571964", "0.5952291", "0.5945057", "0.5940699", "0.59355575", "0.5902031", "0.5888204", "0.58864725", "0.58700943", "0.58379626", "0.5829348", "0.58193904", "0.5796712", "0.5793817", "0.5785162", "0.57752985", "0.57717514", "0.5762646", "0.57623726", "0.574893", "0.5743005", "0.5740703", "0.57223576", "0.57197547", "0.57184005", "0.5711759", "0.57057345", "0.569275", "0.5689941", "0.56674373", "0.56632686", "0.56503", "0.56428796", "0.5628607", "0.5618046", "0.5578458", "0.55777055", "0.557693", "0.55730975", "0.5563545", "0.5560005", "0.55536014", "0.5549462", "0.5547229", "0.5547044", "0.5545725", "0.5543397", "0.5542071", "0.5541613", "0.553011", "0.5507069", "0.5503246", "0.54955405", "0.54842836", "0.5478028", "0.5473285", "0.54722", "0.54685396" ]
0.8375335
0
retrieve 2D alignment parameters from the header alpha tx ty mirror scale
def get_params2D(ima, xform = "xform.align2d"): t = ima.get_attr(xform) d = t.get_params("2D") return d["alpha"],d["tx"],d["ty"],d["mirror"],d["scale"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def _secondary_beam(self, hdr):\n # Called ApSecondaryNano in OpenMIMS\n d = {}\n tmp = unpack(self._bo + 'd 42i 2d', hdr.read(192))\n d['E0W'], d['ES'] = tmp[:2]\n d['ES widths'] = tmp[2:12]\n d['ES heights'] = tuple(tmp[12:22])\n d['AS'] = tmp[22]\n d['AS widths'] = tuple(tmp[23:33])\n d['AS heights'] = tuple(tmp[33:43])\n d['EnS'], d['EnS width'] = tmp[43:]\n return d", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def to_alignment(self):\n alignment = dict()\n alignment[\"x\"] = self.x\n alignment[\"w\"] = self.w\n alignment[\"y\"] = self.y\n alignment[\"h\"] = self.h\n alignment[\"frame_dims\"] = self.frame_dims\n alignment[\"landmarksXY\"] = self.landmarksXY\n return alignment", "def get_metrics(H):\n theta = np.arctan2(H[0,1], H[0,0])\n scale = H[0,0] / np.cos(theta)\n tx = H[0,2]\n ty = H[1,2]\t\n return tx,ty,theta", "def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]", "def text_alignment(x, y):\n if x == 0:\n ha = \"center\"\n elif x > 0:\n ha = \"left\"\n else:\n ha = \"right\"\n if y == 0:\n va = \"center\"\n elif y > 0:\n va = \"bottom\"\n else:\n va = \"top\"\n\n return ha, va", "def _decode_header(self):\n #header = self.file_content[0:6]\n log_screen_descr = self.file_content[6:13]\n self.canvas_width = log_screen_descr[0] + (log_screen_descr[1]<<8)\n self.canvas_height = log_screen_descr[2] + (log_screen_descr[3]<<8)\n # is there a global color table? (usually yes)\n flags = log_screen_descr[4]\n self.glob_col_table = (flags & 0b10000000) != 0\n\n # determine the number of bits per primary color value\n self.color_resolution = (flags & 0b01110000) >> 4\n self.bits_per_pixel = self.color_resolution + 1\n\n # If the value is 1, then the colors in the global color table are sorted\n # in order of \"decreasing importance,\" which typically means \"decreasing\n # frequency\" in the image\n self.sort_flag = (flags & 0b00001000) != 0\n\n # If this value is N, then the actual table size is 2^(N+1).\n self.glob_col_table_sz = 1 << ((flags & 0b00000111)+1)\n\n self.bg_color_index = log_screen_descr[5]\n self.pix_asp_ratio = log_screen_descr[6]", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def make_wcsheader(ra=40.07293, dec=-1.6137748, size=2, pixscale=0.1, get_hdu=False, theta=0):\n \n if np.isscalar(pixscale):\n cdelt = [pixscale/3600.]*2\n else:\n cdelt = [pixscale[0]/3600., pixscale[1]/3600.]\n \n if np.isscalar(size):\n npix = np.cast[int]([size/pixscale, size/pixscale])\n else:\n npix = np.cast[int]([size[0]/pixscale, size[1]/pixscale])\n \n hout = pyfits.Header()\n hout['CRPIX1'] = npix[0]/2\n hout['CRPIX2'] = npix[1]/2\n hout['CRVAL1'] = ra\n hout['CRVAL2'] = dec\n hout['CD1_1'] = -cdelt[0]\n hout['CD1_2'] = hout['CD2_1'] = 0.\n hout['CD2_2'] = cdelt[1]\n hout['NAXIS1'] = npix[0]\n hout['NAXIS2'] = npix[1]\n hout['CTYPE1'] = 'RA---TAN'\n hout['CTYPE2'] = 'DEC--TAN'\n \n wcs_out = pywcs.WCS(hout)\n \n theta_rad = np.deg2rad(theta)\n mat = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n\n rot_cd = np.dot(mat, wcs_out.wcs.cd)\n \n for i in [0,1]:\n for j in [0,1]:\n hout['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n wcs_out.wcs.cd[i,j] = rot_cd[i,j]\n \n cd = wcs_out.wcs.cd\n wcs_out.pscale = get_wcs_pscale(wcs_out) #np.sqrt((cd[0,:]**2).sum())*3600.\n \n if get_hdu:\n hdu = pyfits.ImageHDU(header=hout, data=np.zeros((npix[1], npix[0]), dtype=np.float32))\n return hdu\n else:\n return hout, wcs_out", "def align(self):\n return self[\"align\"]", "def align(self):\n return self[\"align\"]", "def align(self):\n return self[\"align\"]", "def parameters_ui(layout, params):\n\n r = layout.row()\n r.prop(params, \"rotation_axis\")\n\n if 'auto' not in params.rotation_axis.lower():\n r = layout.row()\n text = \"Auto align Foot\"\n r.prop(params, \"auto_align_extremity\", text=text)\n\n r = layout.row()\n r.prop(params, \"segments\")\n\n r = layout.row()\n r.prop(params, \"bbones\")\n\n bone_layers = bpy.context.active_pose_bone.bone.layers[:]\n\n for layer in ['fk', 'tweak']:\n r = layout.row()\n r.prop(params, layer + \"_extra_layers\")\n r.active = params.tweak_extra_layers\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(16, 24):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8, 16):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(24, 32):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)", "def getAttributes(self):\n spacing = self.getGridSpacing()\n pan_x = self.pan_pos.x()\n pan_y = self.pan_pos.y()\n\n num_columns = int((self.zoom_factor / spacing) * self.aspect_ratio) + 2\n num_rows = int((self.zoom_factor / spacing)) + 2\n\n # get camera offset (returns pan pos to integer)\n x_offset = pan_x % spacing\n y_offset = pan_y % spacing\n\n return num_columns, num_rows, pan_x, pan_y, x_offset, y_offset, spacing", "def alignsrc(self):\n return self[\"alignsrc\"]", "def alignsrc(self):\n return self[\"alignsrc\"]", "def alignsrc(self):\n return self[\"alignsrc\"]", "def align(self):\n ...", "def wfirst_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, naxis=(4096,4096)):\n #naxis = 2048, 2048\n crpix = naxis[0]/2., naxis[0]/2.\n \n cd = np.array([[ -0.11, 0], [0, 0.11]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n h['BACKGR'] = 0.17+0.49, 'Total, e/s SDT Report A-1'\n h['FILTER'] = 'GRS', 'WFIRST grism'\n h['INSTRUME'] = 'WFIRST'\n h['READN'] = 17, 'SDT report Table 3-3' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def _primary_beam(self, hdr):\n # Called ApPrimaryNano in OpenMIMS\n d = {}\n start_position = hdr.tell()\n d['source'], d['current start'], d['current end'], d['Lduo'], d['L1'] = \\\n unpack(self._bo + '8s 4i', hdr.read(24))\n\n # Each widths list is 10 ints long\n d['Dduo'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['Dduo widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n d['D0'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['D0 widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n d['D1'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['D1 widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n\n # 4 bytes unused\n hdr.seek(4, 1)\n d['raster'], d['oct45'], d['oct90'], d['E0P'], \\\n d['pressure analysis chamber'] = \\\n unpack(self._bo + '4d 32s', hdr.read(64))\n\n d['source'] = self._cleanup_string(d['source'])\n d['pressure analysis chamber'] = self._cleanup_string(d['pressure analysis chamber'])\n\n if self.header['analysis version'] >= 3:\n d['L0'] = unpack(self._bo + 'i', hdr.read(4))[0]\n if self.header['analysis version'] >= 4:\n d['hv cesium'], d['hv duo'] = unpack(self._bo + '2i', hdr.read(8))\n # DCs not in OpenMIMS; only in certain release/version?\n d['Dcs'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['Dcs widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n\n # skip bytes until total read in this function is 552\n # OpenMIMS: size_Ap_primary_nano = 552\n # Newer versions have rest filled with \\xCC continuation bytes, but\n # older versions have null-bytes, but not all bytes are null!!\n # The numbers do not seem to represent anything, though, so can be skipped.\n hdr.seek(start_position + 552)\n return d", "def _getAttributes(self):\n self._params = {}\n if self.interp is not None:\n # Initialize interpolation function :\n self['x'] = np.arange(0, self.pixels, 1)\n self['y'] = np.arange(0, self.pixels, 1)\n # Define newaxis :\n self['xnew'] = np.arange(0, self.pixels, self.interp)\n self['ynew'] = np.arange(0, self.pixels, self.interp)\n self['csize'] = len(self['xnew'])\n else:\n self['csize'] = self.pixels\n # Variables :\n l = int(self['csize'] / 2)\n self['l'] = l\n y, x = np.ogrid[-l:l, -l:l]\n disc = x**2 + y**2\n self['mask'] = disc < l**2\n self['nmask'] = np.invert(self['mask'])\n # self['image'] = np.tile(self.bgcolor[np.newaxis, ...], (2*l, 2*l, 1))", "def generate_anchors_info():\n original_height, original_width = 512, 640\n input_anchor = Anchor(\n min_level=2,\n max_level=6,\n num_scales=1,\n aspect_ratios=[1.0, 2.0, 0.5],\n anchor_size=8,\n image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))\n anchor_boxes = input_anchor.multilevel_boxes\n for key in anchor_boxes:\n anchor_boxes[key] = anchor_boxes[key].numpy()\n\n scale = min(_IMAGE_SIZE.value / original_height,\n _IMAGE_SIZE.value / original_width)\n image_info = np.array([[[original_height, original_width],\n [_IMAGE_SIZE.value, _IMAGE_SIZE.value],\n [scale, scale], [0, 0]]])\n\n return anchor_boxes, image_info", "def _read_next_alignment(self, stream):", "def get_params(img, output_size):\n c, h, w = img.shape\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = (h - th)//2\n j = (w - tw)//2\n return i, j, th, tw", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def get_calib_from_header(header):\n\n prefix = 'HIERARCH GAMSE WLCALIB '\n\n xorder = header[prefix+'XORDER']\n yorder = header[prefix+'YORDER']\n\n coeff = np.zeros((yorder+1, xorder+1))\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n coeff[j,i] = header[prefix+'COEFF {:d} {:d}'.format(j, i)]\n\n calib = {\n 'coeff': coeff,\n 'npixel': header[prefix+'NPIXEL'],\n 'k': header[prefix+'K'],\n 'offset': header[prefix+'OFFSET'],\n 'std': header[prefix+'STDDEV'],\n 'nuse': header[prefix+'NUSE'],\n 'ntot': header[prefix+'NTOT'],\n# 'identlist': calibwindow.identlist,\n 'window_size': header[prefix+'WINDOW_SIZE'],\n 'xorder': xorder,\n 'yorder': yorder,\n 'maxiter': header[prefix+'MAXITER'],\n 'clipping': header[prefix+'CLIPPING'],\n 'q_threshold': header[prefix+'Q_THRESHOLD'],\n 'direction': header[prefix+'DIRECTION'],\n }\n return calib", "def getImageInfo(img, header=''):\n if (os.path.exists(img) == False):\n print \"image not found: \", img\n return\n # Assume this is a CASA image\n if (header == ''):\n try:\n print \"imhead\",\n header = imhead(img, mode = 'list') # This will work for most CASA builds\n except:\n print \"imhead\",\n header = imhead(img) # needed to prevent crash in early CASA 4.6 builds (see CAS-8214)\n print \"imhead\",\n header = imhead(img, mode = 'list')\n if (header is None):\n print \"imhead returned NoneType. This image header is not sufficiently standard.\"\n return\n if ('beammajor' in header.keys()):\n bmaj = header['beammajor']\n bmin = header['beamminor']\n bpa = header['beampa']\n elif ('perplanebeams' in header.keys()):\n beammajor = []\n beamminor = []\n beampa = []\n for beamchan in range(header['perplanebeams']['nChannels']):\n beamdict = header['perplanebeams']['*'+str(beamchan)]\n beammajor.append(beamdict['major']['value'])\n beamminor.append(beamdict['minor']['value'])\n beampa.append(beamdict['positionangle']['value'])\n bmaj = np.median(beammajor)\n bmin = np.median(beamminor)\n sinbpa = np.sin(np.radians(np.array(beampa)))\n cosbpa = np.cos(np.radians(np.array(beampa)))\n bpa = np.degrees(np.median(np.arctan2(np.median(sinbpa), np.median(cosbpa))))\n else:\n bmaj = 0\n bmin = 0\n bpa = 0\n naxis1 = header['shape'][0]\n naxis2 = header['shape'][1]\n cdelt1 = header['cdelt1']\n cdelt2 = header['cdelt2']\n if (header['cunit1'].find('rad') >= 0):\n # convert from rad to arcsec\n cdelt1 *= 3600*180/np.pi\n elif (header['cunit1'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt1 *= 3600\n if (header['cunit2'].find('rad') >= 0):\n cdelt2 *= 3600*180/np.pi\n # convert from rad to arcsec\n elif (header['cunit2'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt2 *= 3600\n if (type(bmaj) == dict):\n # casa >= 4.1.0 (previously these were floats)\n bmaj = headerToArcsec(bmaj)\n bmin = headerToArcsec(bmin)\n bpa = headerToArcsec(bpa)/3600.\n ghz = 0\n if ('ctype4' in header.keys()):\n if (header['ctype4'] == 'Frequency'):\n imgfreq = header['crval4']\n cdelt = header['cdelt4']\n crpix = header['crpix4']\n npix = header['shape'][3]\n ghz = imgfreq*1e-9\n if (ghz == 0):\n if ('ctype3' in header.keys()):\n if (header['ctype3'] == 'Frequency'):\n imgfreq = header['crval3']\n cdelt = header['cdelt3']\n crpix = header['crpix3']\n npix = header['shape'][2]\n ghz = imgfreq*1e-9\n return([bmaj,bmin,bpa,cdelt1,cdelt2,naxis1,naxis2,ghz], header)", "def overlay_alignment(self):\n return self._overlay_alignment", "def get_alignment(self, names=None):\n names = names or self.experiments.keys()\n return dict([(e, self.experiments[e]['align']) \\\n for e in names if 'align' in self.experiments[e]])", "def get_params(pic, output_size):\n\n c, w, h = pic.shape\n th, tw = output_size\n\n i = int(round((h - th) / 2.))\n j = int(round((w - tw) / 2.))\n\n return i, j, th, tw", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def getParameters(self):\n\t\td = AdaptiveBatAlgorithm.getParameters(self)\n\t\td.update({\n\t\t\t'A_l': self.A_l,\n\t\t\t'A_u': self.A_u,\n\t\t\t'r_l': self.r_l,\n\t\t\t'r_u': self.r_u,\n\t\t\t'tao_1': self.tao_1,\n\t\t\t'tao_2': self.tao_2\n\t\t})\n\t\treturn d", "def get_params(pic, output_size):\n\n w, h, c = pic.shape\n th, tw = output_size\n\n i = int(round((h - th) / 2.))\n j = int(round((w - tw) / 2.))\n\n return i, j, th, tw", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def calculate_module_offsets(self):\n \n # These aren't for instantiating, but we use them to get the dimensions\n self.poly_contact_offset = vector(0.5*contact.poly.width,0.5*contact.poly.height)\n\n # M1/M2 routing pitch is based on contacted pitch\n self.m1_pitch = max(contact.m1m2.width,contact.m1m2.height) + max(self.m1_space,self.m2_space)\n self.m2_pitch = max(contact.m2m3.width,contact.m2m3.height) + max(self.m2_space,self.m3_space)\n \n # This corrects the offset pitch difference between M2 and M1\n self.offset_fix = vector(0.5*(self.m2_width-self.m1_width),0)\n\n # delay chain will be rotated 90, so move it over a width\n # we move it up a inv height just for some routing room\n self.rbl_inv_offset = vector(self.delay_chain.height, self.inv.width)\n # access TX goes right on top of inverter, leave space for an inverter which is\n # about the same as a TX. We'll need to add rails though.\n self.access_tx_offset = vector(1.25*self.inv.height,self.rbl_inv_offset.y) + vector(0,2.5*self.inv.width)\n self.delay_chain_offset = self.rbl_inv_offset + vector(0,4*self.inv.width)\n\n # Replica bitline and such are not rotated, but they must be placed far enough\n # away from the delay chain/inverter with space for three M2 tracks\n self.bitcell_offset = self.rbl_inv_offset + vector(2*self.m2_pitch, 0) + vector(0, self.bitcell.height + self.inv.width)\n\n self.rbl_offset = self.bitcell_offset\n\n \n self.height = self.rbl_offset.y + self.rbl.height + self.m2_pitch\n self.width = self.rbl_offset.x + self.bitcell.width", "def _target_xy(header, outwcs):\n tgt_x, tgt_y = None, None\n tgt_ra = header.get('TGTRA', None)\n tgt_dec = header.get('TGTDEC', None)\n if tgt_ra is not None and tgt_dec is not None \\\n and not np.allclose([tgt_ra, tgt_dec], 0):\n # convert from hours to degrees\n tgt_ra *= 15.0\n if outwcs.wcs.naxis == 2:\n tgt_x, tgt_y = \\\n outwcs.wcs_world2pix(tgt_ra, tgt_dec, 0)\n else:\n tgt_w, tgt_y, tgt_x = \\\n outwcs.wcs_world2pix(0, tgt_dec, tgt_ra, 0)\n return tgt_x, tgt_y", "def CalcAtmTransmissionForImage(img, header='', chanInfo='', airmass=1.5,pwv=-1, \n spectralaxis=-1, \n value='transmission', P=-1, H=-1, \n T=-1, altitude=-1):\n if (header == ''):\n print \"imhead\", # the comma prevents the newline so that ...10...20 will be on same line\n header = imhead(img,mode='list')\n if (type(header) != dict):\n # Input was a spectrum rather than an image\n if (chanInfo[1] < 60e9):\n telescopeName = 'ALMA'\n else:\n telescopeName = 'VLA'\n else:\n telescopeName = header['telescope']\n # this will not match up with the plot, which uses numberOfChannelsInCube\n# freqs = getFreqsForImage(img, header, spectralaxis)\n freqs = np.linspace(chanInfo[1]*1e-9,chanInfo[2]*1e-9,chanInfo[0])\n# print \"freqs: %f-%f\" % (freqs[0], freqs[-1])\n numchan = len(freqs)\n lsrkwidth = (chanInfo[2] - chanInfo[1])/(numchan-1)\n result = cubeLSRKToTopo(img, nchan=numchan, f0=chanInfo[1], f1=chanInfo[2], chanwidth=lsrkwidth)\n if (result is None):\n topofreqs = freqs\n else:\n topoWidth = (result[1]-result[0])/(numchan-1)\n topofreqs = np.linspace(result[0], result[1], chanInfo[0]) * 1e-9\n casalogPost(\"Converted LSRK range (%f-%f) to TOPO (%f-%f) over %d channels\" % (chanInfo[1]*1e-9, chanInfo[2]*1e-9,topofreqs[0],topofreqs[-1],numchan))\n P0 = 1000.0 # mbar\n H0 = 20.0 # percent\n T0 = 273.0 # Kelvin\n if (telescopeName.find('ALMA') >= 0 or telescopeName.find('ACA') >= 0):\n pwv0 = 1.0 \n P0 = 563.0\n H0 = 20.0\n T0 = 273.0\n altitude0 = 5059\n elif (telescopeName.find('VLA') >= 0):\n P0 = 786.0\n pwv0 = 5.0 \n altitude0 = 2124\n else:\n pwv0 = 10.0 \n altitude0 = 0\n if (pwv < 0):\n pwv = pwv0\n if (T < 0):\n T = T0\n if (H < 0):\n H = H0\n if (P < 0):\n P = P0\n if (altitude < 0):\n altitude = altitude0\n tropical = 1\n midLatitudeSummer = 2\n midLatitudeWinter = 3\n# print \"image bandwidth = %f GHz\" % (np.max(freqs)-np.min(freqs))\n reffreq = np.mean(topofreqs)\n numchanModel = numchan*1\n chansepModel = (topofreqs[-1]-topofreqs[0])/(numchanModel-1)\n# print \"regridded bandwidth=%f GHz, chansep=%f, reffreq=%f\" % (np.max(topofreqs)-np.min(topofreqs), chansepModel, reffreq)\n nbands = 1\n myqa = createCasaTool(qatool)\n fCenter = create_casa_quantity(myqa, reffreq, 'GHz')\n fResolution = create_casa_quantity(myqa, chansepModel, 'GHz')\n fWidth = create_casa_quantity(myqa, numchanModel*chansepModel, 'GHz')\n myat = casac.atmosphere()\n myat.initAtmProfile(humidity=H, temperature=create_casa_quantity(myqa,T,\"K\"),\n altitude=create_casa_quantity(myqa,altitude,\"m\"),\n pressure=create_casa_quantity(myqa,P,'mbar'),atmType=midLatitudeWinter)\n myat.initSpectralWindow(nbands, fCenter, fWidth, fResolution)\n myat.setUserWH2O(create_casa_quantity(myqa, pwv, 'mm'))\n# myat.setAirMass() # This does not affect the opacity, but it does effect TebbSky, so do it manually.\n myqa.done()\n\n dry = np.array(myat.getDryOpacitySpec(0)[1])\n wet = np.array(myat.getWetOpacitySpec(0)[1]['value'])\n TebbSky = myat.getTebbSkySpec(spwid=0)[1]['value']\n # readback the values to be sure they got set\n \n rf = myat.getRefFreq()['value']\n cs = myat.getChanSep()['value']\n if (myat.getRefFreq()['unit'] != 'GHz'):\n casalogPost(\"There is a unit mismatch for refFreq in the atm code.\")\n if (myat.getChanSep()['unit'] != 'MHz'):\n casalogPost(\"There is a unit mismatch for chanSep in the atm code.\")\n numchanModel = myat.getNumChan()\n freq0 = myat.getChanFreq(0)['value']\n freq1 = myat.getChanFreq(numchanModel-1)['value']\n# print \"atm returned bandwidth = %f GHz = %f to %f \" % (freq1-freq0, freq0, freq1)\n newfreqs = np.linspace(freqs[0], freqs[-1], numchanModel) # fix for SCOPS-4815\n# print \"freqs: %f-%f newfreqs: %f-%f\" % (freqs[0], freqs[-1], newfreqs[0], newfreqs[-1])\n transmission = np.exp(-airmass*(wet+dry))\n TebbSky *= (1-np.exp(-airmass*(wet+dry)))/(1-np.exp(-wet-dry))\n if value=='transmission':\n values = transmission\n else:\n values = TebbSky\n del myat\n return(newfreqs, values)", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def readaccl(self):\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_H_A)\r\n\t\t\r\n\t\txAccl = data1 * 256 + data0\r\n\t\tif xAccl > 32767 :\r\n\t\t\txAccl -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Y_L_M(0x2A), 2 bytes\r\n\t\tY-Axis Mag LSB, Y-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_H_A)\r\n\t\t\r\n\t\tyAccl = data1 * 256 + data0\r\n\t\tif yAccl > 32767 :\r\n\t\t\tyAccl -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Z_L_M(0x2C), 2 bytes\r\n\t\tZ-Axis Mag LSB, Z-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_H_A)\r\n\t\t\r\n\t\tzAccl = data1 * 256 + data0\r\n\t\tif zAccl > 32767 :\r\n\t\t\tzAccl -= 65536\r\n\t\t\r\n\t\treturn {'x' : xAccl, 'y' : yAccl, 'z' : zAccl}", "def get_transform_from_dh(a, alpha, d, theta):\n pass", "def __prepare_dh_params(self):\n self.alpha = symbols('alpha0:' + str(self.joint_count))\n self.a = symbols('a0:' + str(self.joint_count))\n self.q = symbols('q1:' + str(self.joint_count + 1))\n self.d = symbols('d1:' + str(self.joint_count + 1))", "def getParams(self):\n return self.W, self.b", "def lattice_settings(self):\n return (self.a, self.b, self.c,\n self.alpha*radians, self.beta*radians, self.gamma*radians)", "def get_params(self):\n return self.w, self.b", "def prob_t_a_given_s(self, alignment_info):\n ...", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def _create_hdr_obj(self, pix_len, pix_scale):\n hdr = astropy.io.fits.Header()\n hdr['NAXIS'] = 2\n hdr['NAXIS1'] = pix_len\n hdr['NAXIS2'] = pix_len\n hdr['CTYPE1'] = 'RA---TAN'\n hdr['CRVAL1'] = float(self.ra_ctr)\n hdr['CRPIX1'] = (pix_len / 2.) * 1.\n hdr['CDELT1'] = -1.0 * pix_scale\n hdr['CTYPE2'] = 'DEC--TAN'\n hdr['CRVAL2'] = float(self.dec_ctr)\n hdr['CRPIX2'] = (pix_len / 2.) * 1.\n hdr['CDELT2'] = pix_scale\n hdr['EQUINOX'] = 2000\n return hdr", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def getTranslationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment in separate array - easier for optimization\n nprojs = len(TiltSeries_._ProjectionList._list)\n self._alignmentTransX = nprojs * [0.]\n self._alignmentTransY = nprojs * [0.]\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n self._alignmentTransX[kk] = proj.getAlignmentTransX()\n self._alignmentTransY[kk] = proj.getAlignmentTransY()\n return self._alignmentTransX, self._alignmentTransY", "def get_preamble_z(self):\n a = PhysicalLayer.get_preamble()\n return 2,np.array([z for z in a['symb'][0:31] for _ in range(self._sps)])", "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def get_material_info(TABLE_info):\n \"\"\"\n 1 Get info from TABLE_info.\n \"\"\"\n width = TABLE_info[0]\n height = TABLE_info[1]\n t_m = TABLE_info[2]\n\n \"\"\"\n 2 Get material info.\n \"\"\"\n z_m = 3 * t_m\n\n m_width = rs.GetInteger(\"Put the width of material\", z_m, None, None)\n\n t_sen = rs.GetReal(\"Put Int(mm): Thickness of material to cut SEN.\", t_m / 2, None, None)\n\n x_m1 = m_width\n x_m2 = height - x_m1\n x_m3 = x_m2\n x_m4 = x_m1\n\n y_m2 = m_width\n y_m3 = y_m2\n y_m1 = width - (y_m2 + y_m3)\n y_m4 = y_m1\n\n\n # material1\n m1_p0 = (x_m3, y_m3)\n m1_p1 = (x_m3, y_m3 + y_m1)\n m1_p2 = (x_m3 + x_m1, y_m3 + y_m1)\n m1_p3 = (x_m3 + x_m1, y_m3)\n m1_points = [m1_p0, m1_p1, m1_p2, m1_p3]\n\n m1_info = [x_m1, y_m1, z_m, m1_points, t_sen]\n\n # material2\n m2_p0 = (0, width - y_m2)\n m2_p1 = (0, width)\n m2_p2 = (height - x_m1, width)\n m2_p3 = (height - x_m1, width - y_m2)\n m2_points = [m2_p0, m2_p1, m2_p2, m2_p3]\n\n m2_info = [x_m2, y_m2, z_m, m2_points, t_sen]\n\n # material3\n m3_p0 = (0, 0)\n m3_p1 = (0, y_m3)\n m3_p2 = (x_m3, y_m3)\n m3_p3 = (x_m3, 0)\n m3_points = [m3_p0, m3_p1, m3_p2, m3_p3]\n\n m3_info = [x_m3, y_m3, z_m, m3_points, t_sen]\n\n # material4\n m4_p0 = (0, y_m3)\n m4_p1 = (0, y_m3 + y_m4)\n m4_p2 = (-x_m4, y_m3 + y_m4)\n m4_p3 = (-x_m4, y_m3)\n m4_points = [m4_p0, m4_p1, m4_p2, m4_p3]\n\n m4_info = [x_m4, y_m4, z_m, m4_points, t_sen]\n\n return m1_info, m2_info, m3_info, m4_info", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def __read_header(self):\n header = self.__file_object.readline()\n header_string = header.decode('utf-8')\n print(header_string)\n # Ignore first letter\n self.frame_width = int(re.findall('W\\d+', header_string)[0][1:])\n self.frame_height = int(re.findall('H\\d+', header_string)[0][1:])\n self.frame_rate = re.findall('F\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual frame rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.frame_rate.split(':')]\n self.frame_rate = round(tokens[0] / tokens[1], 1)\n\n self.__pixel_aspect_ratio = re.findall('A\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual pixel aspect ratio rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.__pixel_aspect_ratio.split(':')]\n self.__pixel_aspect_ratio = round(tokens[0] / tokens[1], 1)\n\n # Don't ignore for interlacing\n self.__interlacing_mode = re.findall('I(p|t|b|m)', header_string)[0]\n\n # Ignore first 'FRAME\\n' terminator so the file object points to the first byte of raw data of the first frame\n self.__file_object.readline()\n\n self.__first_frame_raw_data_position = self.__file_object.tell()\n\n self.determine_color_space_by_frame_size()\n\n # Restore\n self.__file_object.seek(self.__first_frame_raw_data_position)\n\n return header\n\n # Color space parameter is missing?\n print('FourCC:\\t\\t', header_string[:4])\n print('Input file:\\t', self.__input_file_path)\n print('Frame size:\\t', f'{self.frame_width}x{self.frame_height}')\n print('Frame rate:\\t', f'{self.frame_rate} FPS')\n print('Aspect Ratio:\\t', self.__pixel_aspect_ratio)\n print('Color space\\t', self.color_space)\n print('Frame size (raw data):', self.__frame_raw_data_size)\n print('Position of first raw:', self.__first_frame_raw_data_position)", "def _decodeMetadata(\n mdAtributesBytes, mdCalibrationBytes, mdTextBytes, size, Nrecords):\n ### TODO: probably XML or JSON decoder should work here?\n mdkeysXY = {\n 'Nx': b'\\x00W\\x00i\\x00d\\x00t\\x00h\\x00\\x00\\x00',\n 'NxBytes': b'\\x00W\\x00i\\x00d\\x00t\\x00h\\x00B\\x00y\\x00t\\x00e\\x00s\\x00\\x00\\x00',\n 'Ny': b'\\x00H\\x00e\\x00i\\x00g\\x00h\\x00t\\x00\\x00\\x00',}\n imgMD = {}\n for key, val in mdkeysXY.items():\n ind = mdAtributesBytes.index(val)\n start = ind + len(val)\n a = mdAtributesBytes[start: start + 2]\n imgMD[key] = frombuffer(a, 'int16')[0]\n mdkeysZ = {\n 'dxy': b'\\rd\\x00C\\x00a\\x00l\\x00i\\x00b\\x00r\\x00a\\x00t\\x00i\\x00o\\x00n\\x00\\x00\\x00',\n }\n for key, val in mdkeysZ.items():\n ind = mdCalibrationBytes.index(val)\n start = ind + len(val)\n if key == 'dxy':\n a = mdCalibrationBytes[start: start + 8]\n imgMD[key] = frombuffer(a, 'float64')[0]\n mdkeysText = {\n 'Nt': b'\\x00T\\x00i\\x00m\\x00e\\x00 \\x00L\\x00o\\x00o\\x00p\\x00:\\x00 '}\n ind = mdTextBytes.index(\n b'\\x00M\\x00e\\x00t\\x00a\\x00d\\x00a\\x00t\\x00a\\x00:')\n metadataText = mdTextBytes[ind:][1::2]\n ind = metadataText.index(b'\\x00\\x08')\n metadataText = metadataText[:ind]\n lines = metadataText.split(b'\\r\\n')\n imgMD['dz'] = 1.0\n for n, line in enumerate(lines):\n if b'Z Stack Loop:' in line and b'- Step:' in lines[n+1]:\n sline = lines[n+1].split(b' ')\n imgMD['dz'] = float64(sline[2])\n imgMD['dz units'] = sline[3]\n ind = mdTextBytes.index(mdkeysText['Nt'])\n di = len(mdkeysText['Nt'])\n val = mdTextBytes[ind + di: ind + di + 8][1::2].split(b'\\r')[0]\n imgMD['Nt'] = int(val)\n imgMD['Nz'] = int(Nrecords/imgMD['Nt'])\n imgMD['raw'] = metadataText\n imgMD['fileSize'] = size\n return imgMD", "def _getMatchAlignmentHeadingOnly(self):\n return self._alignmentIsHeadingOnly.value", "def get_aligned_frames(pipeline, align):\n frames = pipeline.wait_for_frames()\n print('frames')\n\n # Align the depth frame to color frame\n aligned_frames = align.process(frames)\n\n # Get aligned frames\n aligned_depth_frame = aligned_frames.get_depth_frame()\n color_frame = aligned_frames.get_color_frame()\n\n return color_frame, aligned_depth_frame", "def _generate_comp_attribs(self, center_lines: List[ndarray],\n text_mask: ndarray, center_region_mask: ndarray,\n top_height_map: ndarray,\n bot_height_map: ndarray, sin_map: ndarray,\n cos_map: ndarray) -> ndarray:\n\n assert isinstance(center_lines, list)\n assert (text_mask.shape == center_region_mask.shape ==\n top_height_map.shape == bot_height_map.shape == sin_map.shape\n == cos_map.shape)\n\n center_lines_mask = np.zeros_like(center_region_mask)\n cv2.polylines(center_lines_mask, center_lines, 0, 1, 1)\n center_lines_mask = center_lines_mask * center_region_mask\n comp_centers = np.argwhere(center_lines_mask > 0)\n\n y = comp_centers[:, 0]\n x = comp_centers[:, 1]\n\n top_height = top_height_map[y, x].reshape(\n (-1, 1)) * self.comp_shrink_ratio\n bot_height = bot_height_map[y, x].reshape(\n (-1, 1)) * self.comp_shrink_ratio\n sin = sin_map[y, x].reshape((-1, 1))\n cos = cos_map[y, x].reshape((-1, 1))\n\n top_mid_points = comp_centers + np.hstack(\n [top_height * sin, top_height * cos])\n bot_mid_points = comp_centers - np.hstack(\n [bot_height * sin, bot_height * cos])\n\n width = (top_height + bot_height) * self.comp_w_h_ratio\n width = np.clip(width, self.min_width, self.max_width)\n r = width / 2\n\n tl = top_mid_points[:, ::-1] - np.hstack([-r * sin, r * cos])\n tr = top_mid_points[:, ::-1] + np.hstack([-r * sin, r * cos])\n br = bot_mid_points[:, ::-1] + np.hstack([-r * sin, r * cos])\n bl = bot_mid_points[:, ::-1] - np.hstack([-r * sin, r * cos])\n text_comps = np.hstack([tl, tr, br, bl]).astype(np.float32)\n\n score = np.ones((text_comps.shape[0], 1), dtype=np.float32)\n text_comps = np.hstack([text_comps, score])\n if la_nms is None:\n raise ImportError('lanms-neo is not installed, '\n 'please run \"pip install lanms-neo==1.0.2\".')\n text_comps = la_nms(text_comps, self.text_comp_nms_thr)\n\n if text_comps.shape[0] >= 1:\n img_h, img_w = center_region_mask.shape\n text_comps[:, 0:8:2] = np.clip(text_comps[:, 0:8:2], 0, img_w - 1)\n text_comps[:, 1:8:2] = np.clip(text_comps[:, 1:8:2], 0, img_h - 1)\n\n comp_centers = np.mean(\n text_comps[:, 0:8].reshape((-1, 4, 2)),\n axis=1).astype(np.int32)\n x = comp_centers[:, 0]\n y = comp_centers[:, 1]\n\n height = (top_height_map[y, x] + bot_height_map[y, x]).reshape(\n (-1, 1))\n width = np.clip(height * self.comp_w_h_ratio, self.min_width,\n self.max_width)\n\n cos = cos_map[y, x].reshape((-1, 1))\n sin = sin_map[y, x].reshape((-1, 1))\n\n _, comp_label_mask = cv2.connectedComponents(\n center_region_mask, connectivity=8)\n comp_labels = comp_label_mask[y, x].reshape(\n (-1, 1)).astype(np.float32)\n\n x = x.reshape((-1, 1)).astype(np.float32)\n y = y.reshape((-1, 1)).astype(np.float32)\n comp_attribs = np.hstack(\n [x, y, height, width, cos, sin, comp_labels])\n comp_attribs = self._jitter_comp_attribs(comp_attribs,\n self.jitter_level)\n\n if comp_attribs.shape[0] < self.num_min_comps:\n num_rand_comps = self.num_min_comps - comp_attribs.shape[0]\n rand_comp_attribs = self._generate_rand_comp_attribs(\n num_rand_comps, 1 - text_mask)\n comp_attribs = np.vstack([comp_attribs, rand_comp_attribs])\n else:\n comp_attribs = self._generate_rand_comp_attribs(\n self.num_min_comps, 1 - text_mask)\n\n num_comps = (\n np.ones((comp_attribs.shape[0], 1), dtype=np.float32) *\n comp_attribs.shape[0])\n comp_attribs = np.hstack([num_comps, comp_attribs])\n\n if comp_attribs.shape[0] > self.num_max_comps:\n comp_attribs = comp_attribs[:self.num_max_comps, :]\n comp_attribs[:, 0] = self.num_max_comps\n\n pad_comp_attribs = np.zeros(\n (self.num_max_comps, comp_attribs.shape[1]), dtype=np.float32)\n pad_comp_attribs[:comp_attribs.shape[0], :] = comp_attribs\n\n return pad_comp_attribs", "def _whctrs(anchor):\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr", "def _whctrs(anchor):\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr", "def stereo_score(alignment):\n #dictionary with properties for each residue\n dic_prop = {'I': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'L': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'V': [1, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'C': [1, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n 'A': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'G': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'M': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'F': [1, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'W': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'H': [1, 1, 0, 0, 0, 0, 1, 1, 0, 1],\n 'K': [1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'R': [0, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'E': [0, 1, 0, 0, 0, 0, 0, 0, 1, 1],\n 'Q': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'D': [0, 1, 1, 0, 0, 0, 0, 0, 1, 1],\n 'N': [0, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'S': [0, 1, 1, 0, 1, 0, 0, 0, 0, 0],\n 'T': [1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'P': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0],\n 'B': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Z': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n '-': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n score_list = []\n for i in range(0, alignment.get_alignment_length()):\n #extract the unique residues in the alignment\n column = ''.join(set(alignment[:, i]))\n stereo_list = []\n #loop through each residue\n for res in range(0, len(column)):\n #replace the residue with list of properties\n residue = column[res]\n #append the properties list to a\n stereo_prop = dic_prop.get(residue)\n stereo_list.append(stereo_prop)\n #number of common properties\n count_stereo = sum(len(set(i)) == 1 for i in zip(*stereo_list))\n #add the number of properties to a list\n score_list.append(count_stereo)\n score_list_final = [float(i*0.1) for i in score_list]\n return score_list_final", "def _whbtms(anchor):\n\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_btm = anchor[0] + 0.5 * (w - 1)\n y_btm = anchor[1] + 1.0 * (h - 1)\n return w, h, x_btm, y_btm", "def init_pos_parms(self):\n\n ## init_pos_parms()\n parms = {}\n\n # length axis\n parms['length_attribute_road'] = ('top', 'cl', 'bottom')[self.segment]\n parms['length_attribute_artifact'] = ('top', 'cl', 'bottom')[self.pos_length]\n\n # width axis\n if type(self.pos_width) is int:\n parms['width_road_rect'] = self.road.lanes[self.pos_width]\n parms['width_attribute_road'] = 'cw'\n parms['width_attribute_artifact'] = 'cw'\n else:\n parms['width_road_rect'] = self.road\n pos_parms = {'l': ('left', 'right'),\n 'c': ('cw', 'cw'),\n 'r': ('right', 'left')}[self.pos_width]\n parms['width_attribute_road'] = pos_parms[0]\n parms['width_attribute_artifact'] = pos_parms[1]\n return parms", "def _format_outputs(self, x, img_h, img_w):\n\n b, _ = x.shape\n h, w = 7, 7\n # B * (H * W * (num_anchors * 5 + num_classes)) --> B * H * W * (num_anchors * 5 + num_classes)\n x = x.view(b, h, w, self.num_anchors * 5 + self.num_classes)\n # Classification scores\n b_scores = x[..., -self.num_classes:]\n # Repeat for anchors to keep compatibility across YOLO versions\n b_scores = F.softmax(b_scores.unsqueeze(3), dim=-1)\n # B * H * W * (num_anchors * 5 + num_classes) --> B * H * W * num_anchors * 5\n x = x[..., :self.num_anchors * 5].view(b, h, w, self.num_anchors, 5)\n # Cell offset\n c_x = torch.arange(w, dtype=torch.float, device=x.device)\n c_y = torch.arange(h, dtype=torch.float, device=x.device)\n # Box coordinates\n b_x = (torch.sigmoid(x[..., 0]) + c_x.view(1, 1, -1, 1)) / w\n b_y = (torch.sigmoid(x[..., 1]) + c_y.view(1, -1, 1, 1)) / h\n b_w = torch.sigmoid(x[..., 2])\n b_h = torch.sigmoid(x[..., 3])\n # B * H * W * num_anchors * 4\n b_coords = torch.stack((b_x, b_y, b_w, b_h), dim=4)\n # Objectness\n b_o = torch.sigmoid(x[..., 4])\n\n return b_coords, b_o, b_scores", "def _whctrs(anchor):\n\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr", "def _whctrs(anchor):\n\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr", "def aligned_face(self):\n return self.aligned[\"face\"]", "def get_affine_reg_params(self):\n affine_params = [\n self.affine_reg_pyramid_steps,\n self.affine_reg_used_pyramid_steps,\n ]\n return affine_params", "def read_conversions(db):\n mpart,Lbox,rsdfac,acheck = None,None,None,None\n with open(db+\"Header/attr-v2\",\"r\") as ff:\n for line in ff.readlines():\n mm = re.search(\"MassTable.*\\#HUMANE\\s+\\[\\s*0\\s+(\\d*\\.\\d*)\\s*0+\\s+0\\s+0\\s+0\\s+\\]\",line)\n if mm != None:\n mpart = float(mm.group(1)) * 1e10\n mm = re.search(\"BoxSize.*\\#HUMANE\\s+\\[\\s*(\\d+)\\s*\\]\",line)\n if mm != None:\n Lbox = float(mm.group(1))\n mm = re.search(\"RSDFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n rsdfac = float(mm.group(1))\n mm = re.search(\"ScalingFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n acheck = float(mm.group(1))\n if (mpart is None)|(Lbox is None)|(rsdfac is None)|(acheck is None):\n print(mpart,Lbox,rsdfac,acheck)\n raise RuntimeError(\"Unable to get conversions from attr-v2.\")\n return mpart, Lbox, rsdfac, acheck\n #", "def _format_outputs(self, x, img_h, img_w):\n\n b, _, h, w = x.shape\n # B * C * H * W --> B * H * W * num_anchors * (5 + num_classes)\n x = x.view(b, self.num_anchors, 5 + self.num_classes, h, w).permute(0, 3, 4, 1, 2)\n # Cell offset\n c_x = torch.arange(w, dtype=torch.float, device=x.device)\n c_y = torch.arange(h, dtype=torch.float, device=x.device)\n # Box coordinates\n b_x = (torch.sigmoid(x[..., 0]) + c_x.view(1, 1, -1, 1)) / w\n b_y = (torch.sigmoid(x[..., 1]) + c_y.view(1, -1, 1, 1)) / h\n b_w = self.anchors[:, 0].view(1, 1, 1, -1) / w * torch.exp(x[..., 2])\n b_h = self.anchors[:, 1].view(1, 1, 1, -1) / h * torch.exp(x[..., 3])\n # B * H * W * num_anchors * 4\n b_coords = torch.stack((b_x, b_y, b_w, b_h), dim=4)\n # Objectness\n b_o = torch.sigmoid(x[..., 4])\n # Classification scores\n b_scores = F.softmax(x[..., 5:], dim=-1)\n\n return b_coords, b_o, b_scores", "def fieldCenter(self):\n if self.ra0 is None:\n self.ra0 = reduce(lambda x, y: x + y, [src.pos.ra for src in self.sources]) / len(\n self.sources) if self.sources else 0\n if self.dec0 is None:\n self.dec0 = reduce(lambda x, y: x + y, [src.pos.dec for src in self.sources]) / len(\n self.sources) if self.sources else 0\n return self.ra0, self.dec0", "def lattice_parameters(self):\n return self.a, self.b, self.c, self.alpha, self.beta, self.gamma", "def nfw_physical2angle_fromNFWparams(self, rhos, rs, z):\n\n D_d = self.cosmo.D_A_z(z)\n Rs_angle = rs / D_d / self.cosmo.arcsec # Rs in arcsec\n theta_Rs = rhos * (4 * rs ** 2 * (1 + numpy.log(1. / 2.)))\n eps_crit = self.get_sigma_crit_lensing(z, self.z_source)\n\n return Rs_angle, theta_Rs / eps_crit / D_d / self.cosmo.arcsec", "def get_angle_info(self):\n return", "def is_aligned_2d(self, p_list1, p_list2, p_left_key, p_top_key, p_width_key, p_height_key):\n # check if terms align vertically\n IVA = self.is_align_1D(p_list1, p_list2, p_left_key, p_width_key)\n # check if terms align horizonally\n IHA = self.is_align_1D(p_list1, p_list2, p_top_key, p_height_key)\n if IVA == True:\n # if terms align vertically get direction and distance\n return self.get_vertical_align_direction(p_list1, p_list2, p_top_key, p_height_key)\n elif IHA == True:\n # if terms align vertically get direction and distance\n return self.get_horizontal_align_direction(p_list1, p_list2, p_left_key, p_width_key)\n else: return None", "def getDustAlignment(grid=None, ppar=None):\n alpar = {}\n\n alvec = DiskEqs.eqDustAlignment(ppar['crd_sys'],grid.x,grid.y,grid.z, ppar['altype'], alpar)\n\n return alvec", "def trans_setup():\n # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)\n # Be Be Be Be Be Be Be lens material\n # 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm]\n # 1 1 5 8 4 2 1 number of lenses\n lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5]\n lens_mat=['Be','Be','Be','Be','Be','Be','Be']\n lens_N=[1,2,4,8,5,1,1]\n trans_pos=[35.2,35.8]\n return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}", "def HeaderBmpinfo( self, bWidth, bHeight, wBitCount, imgdata, bColorCount ):\n ## (4bytes)biSize - (4bytes)biWidth - (4bytes)biHeight - (2bytes)biPlanes - (2bytes)biBitCount -\n ## - (4bytes)biCompression - (4bytes)biSizeImage -\n ## - (4bytes)biXPelsPerMeter - (4bytes)biYPelsPerMeter - (4bytes)biClrused - (4bytes)biClrImportant.\n biSize = calcsize('3I2H2I2i2I')\n biWidth = bWidth\n biHeight = bHeight * 2 # include the mask height\n biPlanes = 1 # color planes must be 1 \n biBitCount = wBitCount # 1, 2, 4, 8, 16, 24, 32 \n biCompression = 0 # only uncompressed images BI_RGB.\n biSizeImage = len(imgdata) + self.CalcRowSize( 1, bWidth ) * abs(bHeight) # calculate pixel array size\n biXPelsPerMeter = 0\n biYPelsPerMeter = 0\n biClrUsed = bColorCount\n biClrImportant = 0\n \n bmpinfoheader = pack('3I2H2I2i2I', biSize, biWidth, biHeight, biPlanes, biBitCount, biCompression, biSizeImage,\n biXPelsPerMeter, biYPelsPerMeter, biClrUsed, biClrImportant)\n return bmpinfoheader", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def reading_of(self, ac_pos):\n\n diff = np.subtract(self.pos, ac_pos)\n rng = norm(diff)\n brg = atan2(diff[1], diff[0])\n return rng, brg", "def test_merge_dim_header():\n hdr_in_1 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3],\n 'p2': [0.1, 0.2, 0.3]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 5, 4, 3)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4, 1, 2, 3],\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 5, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 6, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_2, hdr_in_1, 6, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': [5, 6, 7, 8, 1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 5, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 7, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]}}\n\n with pytest.raises(NIfTI_MRSIncompatible) as exc_info:\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 5, 4, 4)\n assert exc_info.type is NIfTI_MRSIncompatible\n assert exc_info.value.args[0] == \"Both files must have matching dimension headers apart from the one being merged.\"\\\n \" dim_7_header does not match.\"", "def verify_header (filename, htypes=None):\n\n # dictionary\n dict_head = {\n # raw header\n # commenting out SIMPLE, BSCALE and BZERO - basic keywords\n # that will be present in images but not in binary fits tables\n #'SIMPLE': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n #'BSCALE': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n #'BZERO': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BITPIX': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS1': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS2': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BUNIT': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n #'CCD-AMP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'SET-TEMP': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'CCD-TEMP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'XBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'YBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n #'CCD-SET': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ALTITUDE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AZIMUTH': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DOMEAZ': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RADESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'EPOCH': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'RA-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'RA-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'DEC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'DEC-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'DEC-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'HA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'FLIPSTAT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'EXPTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ISTRACKI': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'ACQSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'ACQEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPS-SHUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DATE-OBS': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'MJD-OBS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'LST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'UTC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'TIMESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ORIGIN': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MPC-CODE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'TELESCOP': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'CL-BASE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRESSURE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-ROOF': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-STRUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRING': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-SPIDER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M2HOLD': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-GUICAM': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M1': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYWIN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYGET': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYCP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRES-CRY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDAVE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDGUST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDDIR': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELAT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELONG': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ELEVATIO': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n #'WEATIME': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'FILTER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n #'FILTERID': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'CCD-ID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'CONTROLL': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'DETSPEED': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'CCD-NW': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'CCD-NH': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'INSTRUME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FOCUSPOS': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'IMAGETYP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'OBJECT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'AIRMASS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ORIGFILE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'OBSERVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'ABOTVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGNAME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERFQ': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'TRAKTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCX': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n #\n # full header\n 'BB-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'BB-START': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'KW-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'LOG': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'LOG-IMA': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'N-INFNAN': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'XTALK-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'XTALK-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'NONLIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NONLIN-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'GAIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'GAIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'OS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'BIASMEAN': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDNOISE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIAS1A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS1A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK1': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BIAS16A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS16A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK16': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'MBIAS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MBIAS-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MB-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'SATURATE': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NOBJ-SAT': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'MFLAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFLAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MF-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MFRING-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFRING-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FRRATIO': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'COSMIC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NCOSMICS': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NSATS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'REDFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MASKFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'S-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'S-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'S-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'S-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-FWSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-SEEING': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-SEESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELONG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELOSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKGSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-VIGNET': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-CORR': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BKG-CHI2': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-FDEG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-FC0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'A-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-INDEX': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-PSCALE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-PSCALX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-PSCALY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROT': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-ROTX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROTY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'A-NAST': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'A-TNAST': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-NAMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-DRA': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DRASTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDEC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PSF-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'PSF-RAD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-RADP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SIZE': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FRAC': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SAMP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-CFGS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FIX': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'PSF-PLDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PSF-CHI2': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SEE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-PMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PC-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PC-NCAL': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PC-TNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-FNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMIN': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPFDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPF0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-TNSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-MZPD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-MZPS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZPDEF': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZP': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-ZPSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-EXTCO': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AIRMASSC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RA-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DEC-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-AIRM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NSIGMA': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'LIMEFLUX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'LIMMAG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'NOBJECTS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'RADECOFF': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'FORMAT-P': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'DUMCAT': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'QC-FLAG': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'DATEFILE': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n #\n # transient header\n 'SWARP-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'SWARP-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-REF': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-DXYLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-DX': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DY': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DXSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DYSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-FNR': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'Z-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-SIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-BSIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-SCMED': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-SCSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FPEMED': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'Z-FPESTD': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NSIGMA': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-LFLUX': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NTRANS': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-FTRANS': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-LMAG': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-NFAKE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'T-FAKESN': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MC-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MC-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MC-MODEL': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'TDUMCAT': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'TQC-FLAG': {'htype':'trans', 'dtype':str, 'DB':True, 'None_OK':False},\n }\n\n # read header of filename\n if isfile (filename):\n header = read_hdulist (filename, get_data=False, get_header=True)\n else:\n # return success=False if it does not exist\n log.warning ('file {} does not exist; not able to verify its header'\n .format(filename))\n return False\n\n\n # force [htypes] to be a list\n htypes_list = list(htypes)\n\n # loop keys in dict_head\n for key in dict_head.keys():\n\n # only check keywords with htype matching the input [htypes]\n if dict_head[key]['htype'] not in htypes_list:\n continue\n\n # check that key is present in header\n if key in header:\n\n # provide warning if dtype not as expected and header\n # keyword value is not 'None'\n if (dict_head[key]['dtype'] != type(header[key]) and\n header[key] != 'None'):\n log.warning ('dtype of keyword {}: {} does not match the '\n 'expected dtype: {} in header of {}'\n .format(key, type(header[key]),\n dict_head[key]['dtype'], filename))\n\n # if key goes to DataBase and value is 'None' or None\n # while 'None_OK' is False, raise an exception\n if (dict_head[key]['DB'] and not dict_head[key]['None_OK'] and\n (header[key] is None or header[key] == 'None')):\n msg = ('DataBase keyword {} not allowed to have \\'None\\' or '\n 'None value in header of {}'.format(key, filename))\n log.error (msg)\n raise ValueError (msg)\n\n\n else:\n msg = 'keyword {} not present in header of {}'.format(key, filename)\n # if keyword will be ingested into the database, raise an exception\n if dict_head[key]['DB']:\n log.error (msg)\n raise KeyError (msg)\n\n else:\n log.warning (msg)\n\n\n return", "def comp_alphas(self):\n Rbo = self.get_Rbo()\n\n # alpha_Tt is the angle of the tooth to have the correct top width\n alpha_Tt = 2 * float(arcsin(self.W3 / (2 * Rbo)))\n\n # alpha_0 + alpha_Tt = slot_ptich\n # Zs * (alpha_0+alpha_Tt) = 2 pi\n alpha_0 = 2 * pi / self.Zs - alpha_Tt\n\n if self.is_outwards():\n # alpha_Tb is the angle of the tooth to have the correct bottom width\n alpha_Tb = 2 * float(arcsin(self.W3 / (2 * (Rbo + self.H2))))\n else:\n alpha_Tb = 2 * float(arcsin(self.W3 / (2 * (Rbo - self.H2))))\n\n # Zs * (alpha_2+alpha_Tb) = 2 pi\n alpha_2 = 2 * pi / self.Zs - alpha_Tb\n\n return (alpha_0, alpha_2)", "def get_params(degrees, translate, scale_ranges, shears, img_size):\n angle = np.random.uniform(degrees[0], degrees[1])\n if translate is not None:\n max_dx = translate[0] * img_size[0]\n max_dy = translate[1] * img_size[1]\n translations = (np.round(np.random.uniform(-max_dx, max_dx)),\n np.round(np.random.uniform(-max_dy, max_dy)))\n else:\n translations = (0, 0)\n\n if scale_ranges is not None:\n scale = np.random.uniform(scale_ranges[0], scale_ranges[1])\n else:\n scale = 1.0\n\n if shears is not None:\n shear = np.random.uniform(shears[0], shears[1])\n else:\n shear = 0.0\n\n return angle, translations, scale, shear", "def format_affine_params(self):\n return self.format_param_pairs(self.get_affine_reg_params())", "def get_aperture_coeffs_in_header(head):\n\n coeffs = {}\n for key, value in head.items():\n exp = '^GAMSE TRACE CHANNEL [A-Z] APERTURE \\d+ COEFF \\d+$'\n if re.match(exp, key) is not None:\n g = key.split()\n channel = g[3]\n aperture = int(g[5])\n icoeff = int(g[7])\n if (channel, aperture) not in coeffs:\n coeffs[(channel, aperture)] = []\n if len(coeffs[(channel, aperture)]) == icoeff:\n coeffs[(channel, aperture)].append(value)\n return coeffs", "def alpha_star(self):\n return self.reciprocal_lattice_parameters[3]", "def get_offsets():\n \n offsets = dict()\n offsets['leiptr'] = [0.0, -0.005, 'left']\n offsets['gjoll'] = [0.15, -0.002, 'left']\n offsets['gd1'] = [0.15, -0.002, 'left']\n offsets['phlegethon'] = [0.0, 0.005, 'center']\n offsets['ylgr'] = [0.15, -0.002, 'left']\n offsets['wambelong'] = [0.0, -0.005, 'left']\n offsets['fimbulthul'] = [0.15, -0.002, 'left']\n offsets['ophiuchus'] = [0.0, -0.005, 'center']\n offsets['elqui'] = [0.15, -0.002, 'left']\n offsets['svol'] = [0.0, -0.004, 'right']\n offsets['ravi'] = [-0.1, 0.002, 'right']\n offsets['sylgr'] = [0.15, -0.002, 'left']\n offsets['jhelum'] = [0.15, -0.002, 'left']\n offsets['indus'] = [0.15, -0.002, 'left']\n offsets['phoenix'] = [0.0, -0.004, 'right']\n offsets['slidr'] = [0.15, 0.002, 'left']\n offsets['atlas'] = [0.1, -0.003, 'left']\n offsets['aliqa_uma'] = [0.15, -0.003, 'left']\n offsets['turbio'] = [-0.15, 0.00, 'right']\n offsets['turranburra'] = [-0.0, -0.003, 'right']\n offsets['fjorm'] = [0.0, -0.004, 'right']\n offsets['triangulum'] = [0.2, -0.005, 'center']\n offsets['willka_yaku'] = [-0.2, 0.005, 'center']\n \n return offsets", "def nircam_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F444W', grism='DFSR'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0648, 0], [0, 0.0648]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F277W':0.30, 'F356W':0.90, 'F444W': 3.00, 'F322W2':1.25, 'F430M':0.65, 'F460M':0.86, 'F410M':0.5} # F410M is a hack, no number\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRCam'\n h['READN'] = 9, 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'DFSR':\n h['GRISM'] = 'DFSR', 'Spectral trace along X'\n else:\n h['GRISM'] = 'DFSC', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def aligned(self):\n return self.__aligned", "def getAlignment(self):\n # Code to complete - generated by traceback through matrix to generate aligned pairs\n \n # find the position of the max_value\n max_value = self.getMaxAlignmentScore()\n max_pos = tuple(numpy.argwhere(self.matrix == max_value)[-1])\n x_pos = max_pos[0]; y_pos = max_pos[1]\n\n # array that holds the tuples\n path = list()\n\n # now find the path to the 0\n \n while self.matrix[x_pos][y_pos] != 0:\n \n # if diagonal is a match take that as priority\n if self.string1[x_pos - 1] == self.string2[y_pos - 1]:\n path.append((x_pos - 1, y_pos - 1))\n x_pos -=1; y_pos -= 1\n continue\n\n # finds the best horizontal alignment\n bestX = 0; bestY = y_pos - 1\n for i in range(x_pos - 1):\n if self.matrix[i][y_pos - 1] >= self.matrix[bestX][bestY]:\n bestX = i\n \n # finds best vertical alignment\n bestX_vertical = x_pos - 1; bestY_vertical = 0\n for i in range(y_pos - 1):\n if self.matrix[x_pos - 1][i] >= self.matrix[bestX_vertical][bestY_vertical]:\n bestY_vertical = i\n \n # if diagonal not satisfied, see which is better\n # the horizontal of vertical alignment.\n if self.matrix[bestX][bestY] < self.matrix[bestX_vertical][bestY_vertical]:\n path.append((bestX_vertical, bestY_vertical))\n x_pos = bestX_vertical; y_pos = bestY_vertical\n else:\n path.append((bestX, bestY))\n x_pos = bestX; y_pos = bestY\n\n return path[::-1] # reversed because we want origin to highest element.", "def recoverParams(self):\n self.shape, self.rate = self.posterior[1] + 1, -self.posterior[0]", "def getGPEParams(self):\n outKeysScaleDouble = ['R', 'gamma_C', 'gamma_R', 'g_C', 'g_R', 'k',\n 'Pth']\n outKeysScaleSingle = outKeysScaleDouble + ['gamma_nl']\n outKeysScale = outKeysScaleSingle if self.singleComp else\\\n outKeysScaleDouble\n outKeys = ['charL', 'charT']\n out = {key: self.__dict__[key + '_scaled'] for key in outKeysScale}\n for key in outKeys:\n out[key] = self.__dict__[key]\n return out", "def get_hyper_params(**kwargs):\n hyper_params = {\n \"anchor_ratios\": [0.5, 1, 2],\n \"anchor_scales\": [16, 32, 64, 128, 256],\n \"stride\": 32,\n \"nms_topn\": 300,\n \"total_pos_bboxes\": 64,\n \"total_neg_bboxes\": 64,\n \"pooling_size\": (7, 7),\n }\n for key, value in kwargs.items():\n if key in hyper_params and value:\n hyper_params[key] = value\n #\n hyper_params[\"anchor_count\"] = len(hyper_params[\"anchor_ratios\"]) * len(hyper_params[\"anchor_scales\"])\n return hyper_params" ]
[ "0.6123718", "0.6082262", "0.5826465", "0.56437796", "0.5509969", "0.5504308", "0.5343178", "0.5336521", "0.5331882", "0.5322334", "0.5322334", "0.5299092", "0.524463", "0.524463", "0.524463", "0.5240063", "0.51976395", "0.5196707", "0.5196707", "0.5196707", "0.5195092", "0.5166009", "0.51351285", "0.51347023", "0.5131803", "0.5124764", "0.5084445", "0.5080971", "0.50682175", "0.5067194", "0.5045498", "0.50172824", "0.49977475", "0.49876788", "0.4985743", "0.49837187", "0.49785796", "0.49722883", "0.49559808", "0.49555746", "0.49457762", "0.49378598", "0.4933511", "0.49314126", "0.49311376", "0.49113062", "0.4904689", "0.49012306", "0.49010116", "0.49005497", "0.48971865", "0.48943654", "0.48760647", "0.48630366", "0.48599645", "0.48597765", "0.48549342", "0.48549342", "0.48543578", "0.4839616", "0.48380917", "0.48308674", "0.48306438", "0.48227853", "0.48227853", "0.48131683", "0.4808546", "0.48008394", "0.47965807", "0.47930124", "0.47930124", "0.47895476", "0.47867763", "0.47817737", "0.47747675", "0.4765292", "0.47642338", "0.47626224", "0.47617903", "0.47606614", "0.47582036", "0.47562027", "0.47551048", "0.47542807", "0.47542807", "0.47456163", "0.47414345", "0.47294176", "0.4726697", "0.47255293", "0.4724046", "0.47228923", "0.47207022", "0.4719172", "0.47188917", "0.47154704", "0.47126243", "0.47023922", "0.46939468", "0.46864286" ]
0.5867003
2
set 2D alignment parameters in the header alpha tx ty mirror scale
def set_params2D(ima, p, xform = "xform.align2d"): t = Transform({"type":"2D","alpha":p[0],"tx":p[1],"ty":p[2],"mirror":p[3],"scale":p[4]}) ima.set_attr(xform, t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def align(self):\n ...", "def parameters_ui(layout, params):\n\n r = layout.row()\n r.prop(params, \"rotation_axis\")\n\n if 'auto' not in params.rotation_axis.lower():\n r = layout.row()\n text = \"Auto align Foot\"\n r.prop(params, \"auto_align_extremity\", text=text)\n\n r = layout.row()\n r.prop(params, \"segments\")\n\n r = layout.row()\n r.prop(params, \"bbones\")\n\n bone_layers = bpy.context.active_pose_bone.bone.layers[:]\n\n for layer in ['fk', 'tweak']:\n r = layout.row()\n r.prop(params, layer + \"_extra_layers\")\n r.active = params.tweak_extra_layers\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(16, 24):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8, 16):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(24, 32):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)", "def make_wcsheader(ra=40.07293, dec=-1.6137748, size=2, pixscale=0.1, get_hdu=False, theta=0):\n \n if np.isscalar(pixscale):\n cdelt = [pixscale/3600.]*2\n else:\n cdelt = [pixscale[0]/3600., pixscale[1]/3600.]\n \n if np.isscalar(size):\n npix = np.cast[int]([size/pixscale, size/pixscale])\n else:\n npix = np.cast[int]([size[0]/pixscale, size[1]/pixscale])\n \n hout = pyfits.Header()\n hout['CRPIX1'] = npix[0]/2\n hout['CRPIX2'] = npix[1]/2\n hout['CRVAL1'] = ra\n hout['CRVAL2'] = dec\n hout['CD1_1'] = -cdelt[0]\n hout['CD1_2'] = hout['CD2_1'] = 0.\n hout['CD2_2'] = cdelt[1]\n hout['NAXIS1'] = npix[0]\n hout['NAXIS2'] = npix[1]\n hout['CTYPE1'] = 'RA---TAN'\n hout['CTYPE2'] = 'DEC--TAN'\n \n wcs_out = pywcs.WCS(hout)\n \n theta_rad = np.deg2rad(theta)\n mat = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n\n rot_cd = np.dot(mat, wcs_out.wcs.cd)\n \n for i in [0,1]:\n for j in [0,1]:\n hout['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n wcs_out.wcs.cd[i,j] = rot_cd[i,j]\n \n cd = wcs_out.wcs.cd\n wcs_out.pscale = get_wcs_pscale(wcs_out) #np.sqrt((cd[0,:]**2).sum())*3600.\n \n if get_hdu:\n hdu = pyfits.ImageHDU(header=hout, data=np.zeros((npix[1], npix[0]), dtype=np.float32))\n return hdu\n else:\n return hout, wcs_out", "def text_alignment(x, y):\n if x == 0:\n ha = \"center\"\n elif x > 0:\n ha = \"left\"\n else:\n ha = \"right\"\n if y == 0:\n va = \"center\"\n elif y > 0:\n va = \"bottom\"\n else:\n va = \"top\"\n\n return ha, va", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def to_alignment(self):\n alignment = dict()\n alignment[\"x\"] = self.x\n alignment[\"w\"] = self.w\n alignment[\"y\"] = self.y\n alignment[\"h\"] = self.h\n alignment[\"frame_dims\"] = self.frame_dims\n alignment[\"landmarksXY\"] = self.landmarksXY\n return alignment", "def updateHeaderComputedValues( self ):\n self.nAvgBytesPerSec = int( self.nNbrChannel*self.nSamplingRate*self.nNbrBitsPerSample/8 )\n self.nSizeBlockAlign = int( self.nNbrChannel*self.nNbrBitsPerSample/8 )\n self.dataType = Wav.getDataType( self.nNbrBitsPerSample )", "def align_attach(*args):\n # check selection, curves, etc\n sel = cmds.ls(sl=True)\n crv1 = \"\"\n crv2 = \"\"\n\n if sel and len(sel)== 2:\n check1 = rig.type_check(sel[0], \"nurbsCurve\")\n check2 = rig.type_check(sel[1], \"nurbsCurve\")\n if not check1 and check2:\n cmds.warning(\"you must select two curves!\")\n return\n else:\n cmds.warning(\"you must select two curves!\")\n return\t\t\n\n crv1, crv2 = sel[0], sel[1]\n newCrv = cmds.alignCurve(crv1, crv2, ch=False, replaceOriginal=False, attach=True, keepMultipleKnots=True, positionalContinuityType=2, tangentContinuity=False, curvatureContinuity=False, name = \"{0}_ATT\".format(crv1))\n cmds.setAttr(\"{0}.v\".format(crv1), 0)\n cmds.setAttr(\"{0}.v\".format(crv2), 0)", "def __prepare_dh_params(self):\n self.alpha = symbols('alpha0:' + str(self.joint_count))\n self.a = symbols('a0:' + str(self.joint_count))\n self.q = symbols('q1:' + str(self.joint_count + 1))\n self.d = symbols('d1:' + str(self.joint_count + 1))", "def format_alignment(self, alignment):\n raise NotImplementedError(\"This method should be implemented\")\n ###################################################\n # You MUST implement this method in the subclass. #\n ###################################################", "def set_params(self):\n max_margin = int(self.alpha) + 1\n self.sample_params['add'] = [0, max_margin, max_margin]", "def set_alignment(\n self,\n horizontal: constants.HorizontalAlignmentStr | None = None,\n vertical: constants.VerticalAlignmentStr | None = None,\n ):\n match horizontal, vertical:\n case None, None:\n return self\n case None, _:\n flag = constants.V_ALIGNMENT[vertical]\n case _, None:\n flag = constants.H_ALIGNMENT[horizontal]\n case _, _:\n flag = constants.V_ALIGNMENT[vertical] | constants.H_ALIGNMENT[horizontal]\n self.setAlignment(flag)\n return self", "def alignCtx(*args, align: bool=True, anchorFirstObject: bool=False, distribute: bool=True,\n exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\", image2:\n Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name: AnyStr=\"\",\n showAlignTouch: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def fiber_alignment(config, ind=0):\n files = sorted(\n glob.glob(config['raw_dir'] + '/radiance/{}/data/data_*.txt'.format(\n config['date'])))\n\n # load data from txt file\n txt = np.genfromtxt(files[ind], delimiter='', skip_header=11)\n\n align = add_align()\n\n # extract pixels of alignment\n pixels = align['pixel'] + config['channel_pixel_adj']\n\n plt.figure(figsize=(12, 9), dpi=300)\n\n ax1 = plt.subplot2grid((2, 4), (0, 0), colspan=4)\n ax1.plot(txt[500, :], '-*')\n ax1.axis([0, 1060, 0, txt[500, :].max() + 20])\n for xc in pixels:\n plt.axvline(x=xc, color='r')\n plt.xlabel('pixels')\n plt.ylabel('counts')\n plt.title('Channel alignment')\n \n ax2 = plt.subplot2grid((2, 4), (1, 0), colspan=2)\n # First section\n ax2.plot(txt[500, :], '-*')\n ax2.axis([0, 200, 0, txt[500, :].max() + 20])\n for xc in pixels:\n plt.axvline(x=xc, color='r')\n plt.xlabel('pixels')\n plt.ylabel('counts')\n plt.title('Initial section')\n\n ax3 = plt.subplot2grid((2, 4), (1, 2), colspan=2)\n # final section\n ax3.plot(txt[500, :], '-*')\n ax3.axis([800, 1060, 0, txt[500, :].max() + 20])\n for xc in pixels:\n plt.axvline(x=xc, color='r')\n plt.xlabel('pixels')\n plt.ylabel('counts')\n plt.title('Final section')\n\n plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n plt.show()", "def gonio_axis_align():\n \n # Invert camera image, so dark pin on light image becomes a peak\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # High threshold, so AD centroid doesn't interpret background\n cam_8ThresholdOld = cam_8.stats4.centroid_threshold.get()\n cam_8.stats4.centroid_threshold.put(150)\n cam_7ThresholdOld = cam_7.stats4.centroid_threshold.get()\n cam_7.stats4.centroid_threshold.put(150)\n \n # HiMag\n # Copy ROI2 geometry (HiMag Mag3) to ROI4 and use ROI4 centroid plugin\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get())\n cam_8.roi4.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get())\n cam_8.roi4.size.x.put(cam_8.roi2.size.x.get() * 0.20)\n cam_8.roi4.size.y.put(cam_8.roi2.size.y.get())\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get() + cam_8.roi2.size.x.get()/2 - cam_8.roi4.size.x.get()/2)\n \n # LoMag\n # Copy ROI2 geometry (LoMag Mag1) to ROI4 and use ROI4 centroid plugin\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get())\n cam_7.roi4.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get())\n cam_7.roi4.size.x.put(cam_7.roi2.size.x.get() * 0.05)\n cam_7.roi4.size.y.put(cam_7.roi2.size.y.get())\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get() + cam_7.roi2.size.x.get()/2 - cam_7.roi4.size.x.get()/2)\n \n centerPinYHiMag0 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag0 = centroid_avg(cam_7.stats4)[1]\n yield from bps.mvr(gonio.o,180)\n time.sleep(2)\n centerPinYHiMag180 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag180 = centroid_avg(cam_7.stats4)[1]\n centerPinYHiMag = (centerPinYHiMag0 + centerPinYHiMag180)/2\n centerPinYLoMag = (centerPinYLoMag0 + centerPinYLoMag180)/2\n\n centerPinOffsYHiMag = centerPinYHiMag - cam_8.roi4.size.y.get() / 2\n centerPinOffsYLoMag = centerPinYLoMag - cam_7.roi4.size.y.get() / 2\n \n # Correct Mag 3 (cam_8 ROI2)\n cam_8.roi2.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + centerPinOffsYHiMag)\n # Correct Mag 4 (cam_8 ROI1)\n cam_8.roi1.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + (cam_8.roi2.size.y.get()-cam_8.roi1.size.y.get())/2)\n \n # Correct Mag 1 (cam_7 ROI2)\n cam_7.roi2.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + centerPinOffsYLoMag)\n # Correct Mag 2 (cam_7 ROI3)\n cam_7.roi3.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + (cam_7.roi2.size.y.get()-cam_7.roi3.size.y.get())/2)\n\n # De-invert image\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # Set thresold to previous value\n cam_8.stats4.centroid_threshold.put(cam_8ThresholdOld)\n cam_7.stats4.centroid_threshold.put(cam_7ThresholdOld)\n \n return", "def __init__(self, size, stride, ratios=None, scales=None, *args, **kwargs):\n super(Anchors, self).__init__()\n # strides and sizes align with FPN feature outputs (p2-pn)\n self.size = size\n self.stride = stride\n # ratios and scales applied to all feature levels from FPN output\n if not ratios:\n ratios = [1] #used in RetinaFace since faces are typically square-like\n #ratios = [0.5, 1, 2]\n self.ratios = ratios\n \n if not scales:\n scales = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\n self.scales = scales\n self.n_anchors = len(ratios) * len(scales)\n self.anchor_utils = AnchorUtils(ratios=self.ratios, scales=self.scales)", "def _rotate(self):\n \r\n if self.clr == 1: # (default rotation) \r\n # o o o o \r\n # o x x o x o o x\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0]] #\r\n elif self.clr == 2:\r\n # o o o o \r\n # o x o x x o x o\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0], [-1, 0, 0, 1]] #\r\n _rowOffsets = [[-1, 0, 0, 1], [-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0]] #\n \r\n elif self.clr == 3: # \r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\n \r\n _colOffsets = [[-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0], [ 1, 1, 0,-1]] #\r\n _rowOffsets = [[ 1, 1, 0,-1], [-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0]] #\n \r\n elif self.clr == 4:\r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\r\n _colOffsets = [[-1, 0, 0, 0], [1, 1, 0, -1], [1, 0, 0,0], [-1, -1, 0,1]]\n _rowOffsets = [[-1,-1, 0, 1], [-1,0, 0, 0], [1,1, 0,-1], [1,0, 0, 0]]\n \r\n elif self.clr == 5: # o o\r\n # o x \r\n # x o x o o o o o x o\r\n # o o \r\n _colOffsets = [[ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0], [-2,-1, 0, 1]] #\r\n _rowOffsets = [[-2,-1, 0, 1], [ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0]] #\r\n elif self.clr == 6: #\r\n # o o o \r\n # o x o x o x o o x o\r\n # o o o \r\n _colOffsets = [[ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0]] #\r\n elif self.clr == 7: # \r\n # o o o o o o o o\r\n # o x o x o x o x\r\n # \r\n _colOffsets = [[-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0]] #@@\r\n _rowOffsets = [[ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1]] #@@\n \r\n self._colOffsets = _colOffsets[self._rot] #@@\r\n self._rowOffsets = _rowOffsets[self._rot] #@@\r\n self._update() #@@\r", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def update_orth_rgba(self):\n self.update_sagital_rgba()\n self.update_coronal_rgba()\n self.update_axial_rgba()", "def __init__(self, angle = 0, center = (0, 0)):\n\n self.angle = angle\n self.center = center\n self.size = (2 * 194 + 3, 185)", "def set_params(self):\n \n lo, hi = self.R.get((self.h, self.w, self.m), (0.0, 0.0))\n params.update({\n 'gamma' : 1.0, # minesweeper is a finite horizon game\n 'epsilon': 0.0,\n 'K': 16,\n 'R_lo': lo,\n 'R_hi': hi,\n 'max_depth': self.h * self.w / 2,\n 'c':hi-lo\n })", "def __init__(self, _sequence, _structure_offset,\n _invert_y, _invert_init_angle, _reverse_actuation,\n _bot_color, _top_color,\n _name,\n _r1, _r2,\n _theta1, _theta2,\n _leg_length):\n\n # Define sequence\n self.sequence = _sequence\n self.structure_offset = _structure_offset\n self.invert_y = _invert_y\n self.bot_color = _bot_color\n self.top_color = _top_color\n self.name = _name\n self.invert_init_angle = _invert_init_angle\n if _reverse_actuation:\n self.invert_init_angle = not self.invert_init_angle\n\n # Create first block\n _d_bot = np.arccos(_theta1) * _r1 / 2\n _d_top = np.arccos(_theta2) * _r2 / 2\n _d_mid = 1 / 100\n _w = 5.5 / 100\n _h = 5.5 / 100\n\n _center = Coordinate(x=0, y=_d_bot - (_h / 2))\n self.block_bot = Block(\n _width=_w,\n _height=_h,\n _center=_center,\n _anchor_d=_d_bot,\n _color=self.bot_color,\n _type='bottom'\n )\n\n # Create mid block\n _center = Coordinate(x=0, y=_r1 - _d_mid + (_h / 2))\n self.block_mid = Block(\n _width=_w,\n _height=_h,\n _center=_center,\n _anchor_d=_d_mid,\n _color=Utils.black,\n _type='middle'\n )\n\n # Create top block\n _center = Coordinate(x=0, y=self.block_mid.get_anchor(type=\"t\").y + _r2 - _d_top + (_h/2))\n self.block_top = Block(\n _width=_w,\n _height=_h,\n _center=_center,\n _anchor_d=_d_top,\n _color=self.top_color,\n _type='top'\n )\n\n # Create the bars_bot\n self.bars_bot = Arm(\n self.block_bot.get_anchor(type=\"t\"),\n self.block_mid.get_anchor(type=\"b\"),\n _r1,\n self.block_bot.get_anchor_distance()\n )\n\n # Create the bars_top\n self.bars_top = Arm(\n self.block_mid.get_anchor(type='t'),\n self.block_top.get_anchor(type='b'),\n _r2,\n self.block_mid.get_anchor_distance()\n )\n\n # Create the spring_bot\n self.spring_bot = Spring(\n _P=Coordinate(x=0, y=self.block_bot.get_anchor(type='t').y),\n _Q=Coordinate(x=0, y=self.block_mid.get_anchor(type='b').y)\n )\n\n # Create the spring_top\n self.spring_top = Spring(\n _P=Coordinate(x=0, y=self.block_mid.get_anchor(type='t').y),\n _Q=Coordinate(x=0, y=self.block_top.get_anchor(type='b').y)\n )\n\n # Compute Theta_s - limits of the angle for the bar.\n self.theta_s_bot = np.arccos(2 * self.block_bot.anchor_d / self.bars_bot.length)\n self.theta_s_top = np.arccos(2 * self.block_mid.anchor_d / self.bars_top.length)\n\n self.theta_i_bot = 0\n self.theta_i_top = 0\n\n self.leg_length = _leg_length\n\n self.A = []\n self.B = []\n self.C = []\n\n self.ground_distance = 0.0\n\n self.init_position()", "def set_alpha(self, alpha=1.0):\r\n self.unif[17] = alpha", "def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)", "def setup_layout_constants(self):\n # determines the spacing between the edge and nmos (rail to active\n # metal or poly_to_poly spacing)\n half_gate_to_gate = 0.5 * (drc[\"poly_to_poly\"] - drc[\"minwidth_metal1\"])\n edge_to_nmos = max(drc[\"metal1_to_metal1\"] - self.nmos.active_contact_positions[0].y,\n half_gate_to_gate - self.nmos.poly_positions[0].y)\n\n # determine the position of the first transistor from the left\n self.nmos_position1 = vector(0,\n 0.5 * drc[\"minwidth_metal1\"] + edge_to_nmos)\n offset = self.nmos_position1 + vector(0,self.nmos.height)\n\n x = vector(self.nmos.active_width - self.nmos.active_contact.width, 0)\n self.nmos_position2 = x + self.nmos_position1.scale(0,1)\n\n # determines the spacing between the edge and pmos\n edge_to_pmos = max(drc[\"metal1_to_metal1\"] - self.pmos.active_contact_positions[0].y,\n half_gate_to_gate - self.pmos.poly_positions[0].y)\n self.pmos_position1 = vector(0,\n self.height - 0.5 * drc[\"minwidth_metal1\"]\n - edge_to_pmos - self.pmos.height)\n self.pmos_position2 = self.pmos_position1 + vector(self.pmos.width,0)\n\n self.well_width = max(self.pmos_position2.x + self.pmos.active_position.x\n + self.pmos.active_width\n + drc[\"active_to_body_active\"] + self.nwell_contact.width \n + drc[\"well_enclosure_active\"],\n self.nmos_position2.x + self.nmos.active_position.x \n + self.nmos.active_width \n + drc[\"active_to_body_active\"] + drc[\"well_enclosure_active\"])\n self.width = self.well_width", "def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]", "def load_aligned(self, image, size=256, padding=48, align_eyes=False):\n self.aligned[\"size\"] = size\n self.aligned[\"padding\"] = padding\n self.aligned[\"align_eyes\"] = align_eyes\n self.aligned[\"matrix\"] = get_align_mat(self, size, align_eyes)\n self.aligned[\"face\"] = AlignerExtract().transform(\n image,\n self.aligned[\"matrix\"],\n size,\n padding)", "def _secondary_beam(self, hdr):\n # Called ApSecondaryNano in OpenMIMS\n d = {}\n tmp = unpack(self._bo + 'd 42i 2d', hdr.read(192))\n d['E0W'], d['ES'] = tmp[:2]\n d['ES widths'] = tmp[2:12]\n d['ES heights'] = tuple(tmp[12:22])\n d['AS'] = tmp[22]\n d['AS widths'] = tuple(tmp[23:33])\n d['AS heights'] = tuple(tmp[33:43])\n d['EnS'], d['EnS width'] = tmp[43:]\n return d", "def _create_hdr_obj(self, pix_len, pix_scale):\n hdr = astropy.io.fits.Header()\n hdr['NAXIS'] = 2\n hdr['NAXIS1'] = pix_len\n hdr['NAXIS2'] = pix_len\n hdr['CTYPE1'] = 'RA---TAN'\n hdr['CRVAL1'] = float(self.ra_ctr)\n hdr['CRPIX1'] = (pix_len / 2.) * 1.\n hdr['CDELT1'] = -1.0 * pix_scale\n hdr['CTYPE2'] = 'DEC--TAN'\n hdr['CRVAL2'] = float(self.dec_ctr)\n hdr['CRPIX2'] = (pix_len / 2.) * 1.\n hdr['CDELT2'] = pix_scale\n hdr['EQUINOX'] = 2000\n return hdr", "def resetAlignmentCenter(self):\n cent = self.TiltSeries_._TiltAlignmentParas.cent\n imdimX = self.TiltSeries_._imdimX\n imdimY = self.TiltSeries_._imdimY\n print(imdimX, imdimY)\n if cent[0] != imdimX//2+1 or cent[1] != imdimY//2+1:\n #rint \"Centers do not match: cent=\"+str(cent)+\", imdim=\"+str(imdim)\n self.TiltSeries_._TiltAlignmentParas.cent = [imdimX//2+1, imdimY//2+1]", "def set_trans(self, head_mri_trans):\n x, y, z = -self.mri_origin[0]\n mri_tgt_trans = translation(x, y, z)\n head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)\n\n x, y, z = self.hsp.nasion[0]\n src_hsp_trans = translation(x, y, z)\n src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)\n\n rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])\n x, y, z = src_tgt_trans[:3, 3]\n\n self.rot_x = rot_x\n self.rot_y = rot_y\n self.rot_z = rot_z\n self.trans_x = x\n self.trans_y = y\n self.trans_z = z", "def __init_af(self,i,h1,h2):\n self.params['W'+i]=np.random.randn(h1,h2)*self.weight_scale\n self.params['b'+i]=np.zeros(h2)\n if self.use_batchnorm:\n self.params['gamma'+i]=np.ones(h2)\n self.params['beta'+i]=np.zeros(h2)", "def _generate_comp_attribs(self, center_lines: List[ndarray],\n text_mask: ndarray, center_region_mask: ndarray,\n top_height_map: ndarray,\n bot_height_map: ndarray, sin_map: ndarray,\n cos_map: ndarray) -> ndarray:\n\n assert isinstance(center_lines, list)\n assert (text_mask.shape == center_region_mask.shape ==\n top_height_map.shape == bot_height_map.shape == sin_map.shape\n == cos_map.shape)\n\n center_lines_mask = np.zeros_like(center_region_mask)\n cv2.polylines(center_lines_mask, center_lines, 0, 1, 1)\n center_lines_mask = center_lines_mask * center_region_mask\n comp_centers = np.argwhere(center_lines_mask > 0)\n\n y = comp_centers[:, 0]\n x = comp_centers[:, 1]\n\n top_height = top_height_map[y, x].reshape(\n (-1, 1)) * self.comp_shrink_ratio\n bot_height = bot_height_map[y, x].reshape(\n (-1, 1)) * self.comp_shrink_ratio\n sin = sin_map[y, x].reshape((-1, 1))\n cos = cos_map[y, x].reshape((-1, 1))\n\n top_mid_points = comp_centers + np.hstack(\n [top_height * sin, top_height * cos])\n bot_mid_points = comp_centers - np.hstack(\n [bot_height * sin, bot_height * cos])\n\n width = (top_height + bot_height) * self.comp_w_h_ratio\n width = np.clip(width, self.min_width, self.max_width)\n r = width / 2\n\n tl = top_mid_points[:, ::-1] - np.hstack([-r * sin, r * cos])\n tr = top_mid_points[:, ::-1] + np.hstack([-r * sin, r * cos])\n br = bot_mid_points[:, ::-1] + np.hstack([-r * sin, r * cos])\n bl = bot_mid_points[:, ::-1] - np.hstack([-r * sin, r * cos])\n text_comps = np.hstack([tl, tr, br, bl]).astype(np.float32)\n\n score = np.ones((text_comps.shape[0], 1), dtype=np.float32)\n text_comps = np.hstack([text_comps, score])\n if la_nms is None:\n raise ImportError('lanms-neo is not installed, '\n 'please run \"pip install lanms-neo==1.0.2\".')\n text_comps = la_nms(text_comps, self.text_comp_nms_thr)\n\n if text_comps.shape[0] >= 1:\n img_h, img_w = center_region_mask.shape\n text_comps[:, 0:8:2] = np.clip(text_comps[:, 0:8:2], 0, img_w - 1)\n text_comps[:, 1:8:2] = np.clip(text_comps[:, 1:8:2], 0, img_h - 1)\n\n comp_centers = np.mean(\n text_comps[:, 0:8].reshape((-1, 4, 2)),\n axis=1).astype(np.int32)\n x = comp_centers[:, 0]\n y = comp_centers[:, 1]\n\n height = (top_height_map[y, x] + bot_height_map[y, x]).reshape(\n (-1, 1))\n width = np.clip(height * self.comp_w_h_ratio, self.min_width,\n self.max_width)\n\n cos = cos_map[y, x].reshape((-1, 1))\n sin = sin_map[y, x].reshape((-1, 1))\n\n _, comp_label_mask = cv2.connectedComponents(\n center_region_mask, connectivity=8)\n comp_labels = comp_label_mask[y, x].reshape(\n (-1, 1)).astype(np.float32)\n\n x = x.reshape((-1, 1)).astype(np.float32)\n y = y.reshape((-1, 1)).astype(np.float32)\n comp_attribs = np.hstack(\n [x, y, height, width, cos, sin, comp_labels])\n comp_attribs = self._jitter_comp_attribs(comp_attribs,\n self.jitter_level)\n\n if comp_attribs.shape[0] < self.num_min_comps:\n num_rand_comps = self.num_min_comps - comp_attribs.shape[0]\n rand_comp_attribs = self._generate_rand_comp_attribs(\n num_rand_comps, 1 - text_mask)\n comp_attribs = np.vstack([comp_attribs, rand_comp_attribs])\n else:\n comp_attribs = self._generate_rand_comp_attribs(\n self.num_min_comps, 1 - text_mask)\n\n num_comps = (\n np.ones((comp_attribs.shape[0], 1), dtype=np.float32) *\n comp_attribs.shape[0])\n comp_attribs = np.hstack([num_comps, comp_attribs])\n\n if comp_attribs.shape[0] > self.num_max_comps:\n comp_attribs = comp_attribs[:self.num_max_comps, :]\n comp_attribs[:, 0] = self.num_max_comps\n\n pad_comp_attribs = np.zeros(\n (self.num_max_comps, comp_attribs.shape[1]), dtype=np.float32)\n pad_comp_attribs[:comp_attribs.shape[0], :] = comp_attribs\n\n return pad_comp_attribs", "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def __init__( self, x1, y1, x2 = 1.1, y2 = 1.1, halign = \"fixed\", valign = \"fixed\", font=42, textSize = None ):\n if x2 == 1.1 and halign == \"fixed\": halign = \"left\"\n if y2 == 1.1 and valign == \"fixed\": valign = \"top\"\n self.halign = halign\n self.valign = valign\n ROOT.TLegend.__init__( self, x1, y1, x2, y2 )\n self.SetTextFont( font )\n if textSize: self.SetTextSize( textSize )", "def __init__(self,\n mass,\n width_1, width_2,\n x0_1, x0_2,\n v0_1=0, v0_2=0,\n h=0.01):\n super().__init__(mass, mass,\n width_1, width_2,\n x0_1, x0_2,\n v0_1, v0_2, h)", "def set_homog_trans_mtx(x: float, y: float, z: float, mtx: numpy.ndarray):\n mtx[0][3] = x\n mtx[1][3] = y\n mtx[2][3] = z", "def __init__(self, *args, **kwargs):\n super(AscatL2Image, self).__init__(*args, **kwargs)", "def trans_setup():\n # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)\n # Be Be Be Be Be Be Be lens material\n # 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm]\n # 1 1 5 8 4 2 1 number of lenses\n lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5]\n lens_mat=['Be','Be','Be','Be','Be','Be','Be']\n lens_N=[1,2,4,8,5,1,1]\n trans_pos=[35.2,35.8]\n return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}", "def __init__(self, encoder_size, decoder_size, label_size):\n super(BilinearAttention, self).__init__()\n self.W = nn.Parameter(torch.zeros(label_size, decoder_size, encoder_size))\n self.u = nn.Parameter(torch.zeros(label_size, encoder_size))\n self.v = nn.Parameter(torch.zeros(label_size, decoder_size))\n self.b = nn.Parameter(torch.zeros(label_size))\n \n nn.init.xavier_uniform_(self.W)\n nn.init.xavier_uniform_(self.u)\n nn.init.xavier_uniform_(self.v)", "def setPos(self, x, y, anchor='ll'):\n self.transform.setPos(glm.vec3(x, y, 0))\n if anchor == 'ul':\n offx = 0\n offy = - self.font.table['ascent']\n elif anchor == 'uc':\n offx = - self._labelWidth / 2\n offy = - self.font.table['ascent']\n elif anchor == 'ur':\n offx = - self._labelWidth\n offy = - self.font.table['ascent']\n elif anchor == 'cl':\n offx = 0\n offy = self._labelHeight / 2 - self.font.table['ascent']\n elif anchor == 'cc':\n offx = - self._labelWidth / 2\n offy = self._labelHeight / 2 - self.font.table['ascent']\n elif anchor == 'cr':\n offx = - self._labelWidth\n offy = self._labelHeight / 2 - self.font.table['ascent']\n elif anchor == 'll':\n offx = 0\n offy = self._labelHeight - self.font.table['ascent']\n elif anchor == 'lc':\n offx = - self._labelWidth / 2\n offy = self._labelHeight - self.font.table['ascent']\n elif anchor == 'lr':\n offx = - self._labelWidth\n offy = self._labelHeight - self.font.table['ascent']\n else:\n raise SystemExit(f\"Unimplemented anchor '{anchor}'\")\n self.model.setPos(glm.vec3(offx, offy, 0))", "def set_dims_in_hdr(hdr, startx, starty, cols, rows):\n hdr['startX'] = (startx, 'Starting CCD pixel column')\n hdr['endX'] = (startx + cols, 'Ending CCD pixel column+1')\n hdr['startY'] = (starty, 'Starting CCD pixel row')\n hdr['endY'] = (starty + rows, 'Ending CCD pixel row+1')", "def add_align():\n try:\n alignment = pd.read_table(cwd + '/config_files/Alignment_Lab_UV_20120822.dat',\n sep='\\s+',\n names=['Channel Number', 'Azimuth', 'Zenith', 'pixel', 'pixel2',\n 'pixel3'], skiprows=1)\n except ValueError:\n print(\"Add alignment file in folder '~./config_files/'. The alignment file\\n\"\n \"must beginning with 'Alignment_' \")\n\n return alignment", "def header_style(self):\n ...", "def __init__(self):\n self.rot_axis = 1", "def wfirst_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, naxis=(4096,4096)):\n #naxis = 2048, 2048\n crpix = naxis[0]/2., naxis[0]/2.\n \n cd = np.array([[ -0.11, 0], [0, 0.11]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n h['BACKGR'] = 0.17+0.49, 'Total, e/s SDT Report A-1'\n h['FILTER'] = 'GRS', 'WFIRST grism'\n h['INSTRUME'] = 'WFIRST'\n h['READN'] = 17, 'SDT report Table 3-3' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def _augment_format(self, header):\n header.add_format_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"INHERITANCE\"),\n (\"Number\", \".\"),\n (\"Type\", \"String\"),\n (\"Description\", \"Compatible modes of inheritance\"),\n ]\n )\n )\n return header", "def create_tangent_angles_equal(self):\n\n self.text_mirror = TextMobject(r\"Specular reflection\")\n self.text_mirror.move_to(4.0 * RIGHT + 2.0 * UP)\n\n self.tex_derive_ti_tr = TexMobject(r\"\\theta_{i}\", r\"=\", r\"\\theta_{r}\", r\"=\", r\"\\theta_{0}\")\n self.tex_derive_ti_tr[0].set_color(self.tex_theta_in_color)\n self.tex_derive_ti_tr[2].set_color(self.tex_theta_ref_color)\n self.tex_derive_ti_tr[4].set_color(RED)\n self.tex_derive_ti_tr.move_to(4.0 * RIGHT + 1.0 * UP)\n\n self.tex_derive_tan_tin_tan_tr = TexMobject(r\"90^{\\circ}\", r\"-\", r\"\\theta_{i}\",\n r\"=\",\n r\"90^{\\circ}\", r\"-\", r\"\\theta_{r}\",\n r\"=\", r\"\\theta_{0}'\")\n for i in range(0,3):\n self.tex_derive_tan_tin_tan_tr[ i].set_color(self.tex_theta_in_color)\n self.tex_derive_tan_tin_tan_tr[4+i].set_color(self.tex_theta_ref_color)\n self.tex_derive_tan_tin_tan_tr[8].set_color(RED)\n self.tex_derive_tan_tin_tan_tr.move_to(4.0 * RIGHT + 0.0 * UP)\n\n self.theta_0 = TexMobject(r\"\\theta_{0}\"). set_color(RED)\n self.theta_0_d = TexMobject(r\"\\theta_{0}'\").set_color(RED)", "def test_merge_dim_header():\n hdr_in_1 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3],\n 'p2': [0.1, 0.2, 0.3]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 5, 4, 3)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4, 1, 2, 3],\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 5, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 6, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_2, hdr_in_1, 6, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': [5, 6, 7, 8, 1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 5, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 7, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]}}\n\n with pytest.raises(NIfTI_MRSIncompatible) as exc_info:\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 5, 4, 4)\n assert exc_info.type is NIfTI_MRSIncompatible\n assert exc_info.value.args[0] == \"Both files must have matching dimension headers apart from the one being merged.\"\\\n \" dim_7_header does not match.\"", "def setup_normalyzer_header(design_matrix: DF, annot_cols: List[str], normalyzer_vals:DF) -> DF:\n\n # Get numbers set up as list of stringified numbers ('-1', '0', '0', '1', '1')\n nbr_annot_cols = len(annot_cols)\n sample_head = [-1] + [0] * (nbr_annot_cols - 1) + list(design_matrix['biorepgroup'])\n sample_head_str = [str(e) for e in sample_head]\n\n # Get text-information about each column\n label_row = list(normalyzer_vals.columns)[:nbr_annot_cols] + list(design_matrix['name'])\n\n headers = pd.DataFrame([sample_head_str, label_row])\n headers.columns = normalyzer_vals.columns\n\n return headers", "def __setattr__(self, item, value):\n if item in ('header', 'lines', 'mag', 'z', 'cubes', 'images',\n 'spectra', 'tables', '_logger', '_filename',\n '_default_size', 'default_size'):\n super(Source, self).__setattr__(item, value)\n else:\n self.header[item] = value", "def setMagnificationsInTiltSeries(self, TiltSeries_):\n kk = 0\n for proj in TiltSeries_._ProjectionList._list:\n proj.setAlignmentMagnification(self._alignmentMagnifications[kk])\n kk = kk + 1", "def nircam_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F444W', grism='DFSR'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0648, 0], [0, 0.0648]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F277W':0.30, 'F356W':0.90, 'F444W': 3.00, 'F322W2':1.25, 'F430M':0.65, 'F460M':0.86, 'F410M':0.5} # F410M is a hack, no number\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRCam'\n h['READN'] = 9, 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'DFSR':\n h['GRISM'] = 'DFSR', 'Spectral trace along X'\n else:\n h['GRISM'] = 'DFSC', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def update_header(self) -> None:\n self.header.partial_reset()\n self.header.point_format_id = self.points.point_format.id\n self.header.point_data_record_length = self.points.point_size\n\n if len(self.points) > 0:\n self.header.update(self.points)\n\n if self.header.version.minor >= 4:\n if self.evlrs is not None:\n self.header.number_of_evlrs = len(self.evlrs)\n self.header.start_of_waveform_data_packet_record = 0\n # TODO\n # if len(self.vlrs.get(\"WktCoordinateSystemVlr\")) == 1:\n # self.header.global_encoding.wkt = 1\n else:\n self.header.number_of_evlrs = 0", "def __init__(self):\n super(StandardArrowHead, self).__init__()\n self._length = 10\n self._width = 0.4", "def renderer_settings(self):\n return {'width': self.width,\n 'height': self.height,\n 'model_matrix': np.eye(4, dtype=np.float32),\n 'view_matrix': self.modelview_matrix,\n 'projection_matrix': self.projection_matrix}", "def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0", "def calculate_module_offsets(self):\n \n # These aren't for instantiating, but we use them to get the dimensions\n self.poly_contact_offset = vector(0.5*contact.poly.width,0.5*contact.poly.height)\n\n # M1/M2 routing pitch is based on contacted pitch\n self.m1_pitch = max(contact.m1m2.width,contact.m1m2.height) + max(self.m1_space,self.m2_space)\n self.m2_pitch = max(contact.m2m3.width,contact.m2m3.height) + max(self.m2_space,self.m3_space)\n \n # This corrects the offset pitch difference between M2 and M1\n self.offset_fix = vector(0.5*(self.m2_width-self.m1_width),0)\n\n # delay chain will be rotated 90, so move it over a width\n # we move it up a inv height just for some routing room\n self.rbl_inv_offset = vector(self.delay_chain.height, self.inv.width)\n # access TX goes right on top of inverter, leave space for an inverter which is\n # about the same as a TX. We'll need to add rails though.\n self.access_tx_offset = vector(1.25*self.inv.height,self.rbl_inv_offset.y) + vector(0,2.5*self.inv.width)\n self.delay_chain_offset = self.rbl_inv_offset + vector(0,4*self.inv.width)\n\n # Replica bitline and such are not rotated, but they must be placed far enough\n # away from the delay chain/inverter with space for three M2 tracks\n self.bitcell_offset = self.rbl_inv_offset + vector(2*self.m2_pitch, 0) + vector(0, self.bitcell.height + self.inv.width)\n\n self.rbl_offset = self.bitcell_offset\n\n \n self.height = self.rbl_offset.y + self.rbl.height + self.m2_pitch\n self.width = self.rbl_offset.x + self.bitcell.width", "def test_default_parameters(self):\n\n assert self.test_shape.rotation_angle == 360\n assert self.test_shape.extrude_both", "def prob_t_a_given_s(self, alignment_info):\n ...", "def align(self):\n flag=0\n input=None\n level=None\n board=None\n ainps={'L0':[],'L1':[],'L2':[],'H0':[]} \n for i in self.inputs:\n if(i.inputnumber.var.get() == 1):\n if i.inpnumall == rareradio:\n input=i.inpnum\n level=i.level\n board=i.board\n print 'Rare chosen:',level,' ',input\n ainps[i.level].append(i.inpnum)\n flag=flag+1\n #print 'ainps:',ainps \n if flag < 2 :\n print \"Align: less then 2 inputs chosen. \" \n return\n if input==None:\n cmd=\"setRareFlag(0,0,0)\"\n else:\n mode='0'\n if level == 'H0': mode = '1'\n cmd=\"setRareFlag(\"+board+','+input+','+mode+\")\"\n print \"seting rare: \",cmd\n output=self.vb.io.execute(cmd,log=\"yes\",applout=\"<>\") \n self.align=Corel(self.vb,ainps)\n self.align.croscor()", "def define_text(cls, position, text, scale=1, rotation=0, text_width=1):\n \n position = np.array(position)\n text_obj = cls()\n text_obj.coord = []\n sep = 0.2*scale\n posref=np.array([0.0,0.0])\n \n #complete text length\n #textlen = 0.5*sep*(len(text)-1)+0.5*scale * np.sum([np.array(Feature.hershey_table[ord(text[x])]['width']) for x in range(len(text))])\n for x in range(len(text)):\n code = ord(text[x])\n letter = Feature.hershey_table[code]\n if x>0:\n posref[0]=posref[0]+ scale*np.array(letter['width'])/2+sep\n #posref[0] = posref[0]-textlen\n #position[0] = position[0] + scale*np.array(letter['width'])/2+sep\n for k in letter['coord']:\n if len(k)>0:\n k = np.array(k)\n coord = [scale*m+posref-np.array([0,0.5*scale]) for m in k]\n text_obj.coord.append(np.squeeze(coord))\n posref[0]=posref[0]+ scale*np.array(letter['width'])/2+sep\n \n #align number\n posref[0]=posref[0]- scale*np.array(letter['width'])/2-sep\n for x in range(len(text_obj.coord)):\n text_obj.coord[x] = np.array([z- np.array([posref[0]/2,0]) for z in text_obj.coord[x]])\n \n #Rotate the number by alpha\n if rotation !=0:\n\n alpha = rotation\n R = np.array([[np.cos(alpha),-np.sin(alpha)],[np.sin(alpha),np.cos(alpha)]])\n\n for i in range(len(text_obj.coord)):\n text_obj.coord[i] = np.squeeze([np.dot([x],R) \n for x in text_obj.coord[i]])\n \n #move the number to position pos\n for x in range(len(text_obj.coord)):\n text_obj.coord[x] = np.array([z+ position for z in text_obj.coord[x]])\n \n #make thick numbers (currently unused)\n '''for n in range(len(text_obj.coord)):\n x = text_obj.coord[n][:,0]\n y = text_obj.coord[n][:,1]\n x2 = np.array([[x[i],(2/3*x[i]+1/3*x[i+1]),(1/3*x[i]+2/3*x[i+1])] for i in range(x.shape[0]-1)])\n y2 = np.array([[y[i],(2/3*y[i]+1/3*y[i+1]),(1/3*y[i]+2/3*y[i+1])] for i in range(y.shape[0]-1)])\n x2 = np.concatenate(x2)\n x2 = np.append(x2,x[-1])\n y2 = np.concatenate(y2)\n y2 = np.append(y2,y[-1])\n x=x2\n y=y2\n\n #nt = np.linspace(0, 1, 40)\n #t = np.zeros(x.shape)\n #t[1:] = np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)\n #t = np.cumsum(t)\n #t /= t[-1]\n #x2 = scipy.interpolate.spline(t, x, nt,order=2)\n #y2 = scipy.interpolate.spline(t, y, nt,order=2)\n newcoord = np.stack((x2,y2),axis=1)\n \n csx = CubicSpline(np.linspace(0, 1, x.shape[0]), x,bc_type = 'clamped')\n csy = CubicSpline(np.linspace(0, 1, y.shape[0]), y,bc_type = 'clamped')\n s\n xs = np.linspace(0, 1,1000)\n newcoord = np.stack((csx(xs),csy(xs)),axis=1)\n \n text_obj.coord[n]=Feature.define_tube(newcoord,0,100).coord[0]'''\n #set object as text type\n text_obj.open = True\n text_obj.text_width = text_width\n return text_obj", "def set_height(self,c, h):\r\n self.h = h\r\n self.T1 = [[-self.R * np.sqrt(3) / (2*self.h), self.R / (2*self.h), 1],[0,-self.R/(self.h),1],[self.R * np.sqrt(3) / (2*self.h), self.R / (2*self.h), 1]]\r\n return self.h", "def _getAttributes(self):\n self._params = {}\n if self.interp is not None:\n # Initialize interpolation function :\n self['x'] = np.arange(0, self.pixels, 1)\n self['y'] = np.arange(0, self.pixels, 1)\n # Define newaxis :\n self['xnew'] = np.arange(0, self.pixels, self.interp)\n self['ynew'] = np.arange(0, self.pixels, self.interp)\n self['csize'] = len(self['xnew'])\n else:\n self['csize'] = self.pixels\n # Variables :\n l = int(self['csize'] / 2)\n self['l'] = l\n y, x = np.ogrid[-l:l, -l:l]\n disc = x**2 + y**2\n self['mask'] = disc < l**2\n self['nmask'] = np.invert(self['mask'])\n # self['image'] = np.tile(self.bgcolor[np.newaxis, ...], (2*l, 2*l, 1))", "def set_symmetry(self):\n if self.symmetry == 'cubic':\n self.c = self.b = self.a\n self.alpha = self.beta = self.gamma = 90.0\n elif self.symmetry == 'tetragonal':\n self.b = self.a\n self.alpha = self.beta = self.gamma = 90.0\n elif self.symmetry == 'orthorhombic':\n self.alpha = self.beta = self.gamma = 90.0\n elif self.symmetry == 'hexagonal':\n self.b = self.a\n self.alpha = self.beta = 90.0\n self.gamma = 120.0\n elif self.symmetry == 'monoclinic':\n self.alpha = self.gamma = 90.0", "def setParameters(self, A_l=0.9, A_u=1.0, r_l=0.001, r_u=0.1, tao_1=0.1, tao_2=0.1, **ukwargs):\n\t\tAdaptiveBatAlgorithm.setParameters(self, **ukwargs)\n\t\tself.A_l, self.A_u, self.r_l, self.r_u, self.tao_1, self.tao_2 = A_l, A_u, r_l, r_u, tao_1, tao_2", "def test_default_alignment(self):\n # Should give same result than test_template_alignment\n reg = ElasticRegistration()\n register = reg.fit_transform(self.unimodal_samples)\n\n values = register([-.25, -.1, 0, .1, .25])\n\n expected = [[[0.599058], [0.997427], [0.772248],\n [0.412342], [0.064725]],\n [[0.626875], [0.997155], [0.791649],\n [0.382181], [0.050098]],\n [[0.620992], [0.997369], [0.785886],\n [0.376556], [0.048804]]]\n\n np.testing.assert_allclose(values, expected, atol=1e-4)", "def SetAlignment(self, l):\r\n\r\n self.alignment = l", "def write_settings(self, settings_file):\n lines = []\n lines.append(f'parameters.pixelSize = {self.pixel_size};')\n lines.append(f'parameters.wavelength = {self.wavelength};')\n lines.append(f'parameters.distance = {self.distance};')\n lines.append(f'parameters.unitCell = {list(self.lattice_settings)};')\n lines.append(f'parameters.ubMat = {str(self.UBmat.tolist())};')\n lines.append(f'parameters.oMat = {str(self.Omat.tolist())};')\n lines.append('parameters.oVec = [0,0,0];')\n lines.append(f'parameters.det0x = {self.xc};')\n lines.append(f'parameters.det0y = {self.yc};')\n lines.append('parameters.xTrans = [0,0,0];')\n lines.append(\n f'parameters.orientErrorDetPitch = {self.pitch * radians};')\n lines.append(f'parameters.orientErrorDetRoll = {self.roll * radians};')\n lines.append(f'parameters.orientErrorDetYaw = {self.yaw * radians};')\n lines.append(\n f'parameters.orientErrorGonPitch = {self.theta * radians};')\n lines.append('parameters.twoThetaCorrection = 0;')\n lines.append(f'parameters.twoThetaNom = 0;')\n lines.append(f'parameters.twoThetaStep = 0;')\n lines.append('parameters.omegaCorrection = 0;')\n lines.append(f'parameters.omegaNom = {self.omega * radians};')\n lines.append(f'parameters.omegaStep = 0;')\n lines.append('parameters.chiCorrection = 0;')\n lines.append(f'parameters.chiNom = {self.chi * radians};')\n lines.append(f'parameters.chiStep = 0;')\n lines.append('parameters.phiCorrection = 0;')\n lines.append(f'parameters.phiNom = {self.phi * radians};')\n lines.append(f'parameters.phiStep = {self.phi_step * radians};')\n lines.append(f'parameters.gridOrigin = {self.grid_origin};')\n lines.append(f'parameters.gridBasis = {self.grid_basis};')\n lines.append(f'parameters.gridDim = {self.grid_step};')\n lines.append('parameters.gridOffset = [0,0,0];')\n lines.append('parameters.extraFlip = false;')\n lines.append(f'outputData.dimensions = {list(self.grid_shape)};')\n lines.append('outputData.chunkSize = [50,50,50];')\n lines.append('outputData.compression = 0;')\n lines.append('transformer.transformOptions = 0;')\n lines.append('transformer.oversampleX = 1;')\n lines.append('transformer.oversampleY = 1;')\n lines.append('transformer.oversampleZ = 4;')\n with open(settings_file, 'w') as f:\n f.write('\\n'.join(lines))", "def extended_frame_annotation(self, original_frame):\n self.frame = self.annotated_frame(original_frame)\n text = \"\"\n if self.is_right():\n text = \"Looking right\"\n elif self.is_left():\n text = \"Looking left\"\n elif self.is_center():\n text = \"Looking center\"\n\n h_ratio = \"HR: \" + str(self.horizontal_ratio())[:4]\n v_ratio = \"VR: \" + str(self.vertical_ratio())[:4]\n\n width = int(0.9 * self.frame.shape[1])\n height = int(0.9 * self.frame.shape[0])\n\n # cv2.putText(self.frame, text, (60, 60), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2)\n # cv2.putText(self.frame, h_ratio, (60, height), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2)\n # cv2.putText(self.frame, v_ratio, (int(0.8 * width), height), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2)\n return self.frame", "def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n joined_sam = open(os.path.join(args.output_dir, 'watson_joinedAligned.out.sam'))\n merged_sam = open(os.path.join(args.output_dir, 'watson_mergedAligned.out.sam'))\n for line in joined_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n else:\n break\n for line in merged_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n elif not line.startswith('@HD'):\n header_handle.write(line)\n else:\n break\n header_handle.close()\n in_files = {'header':os.path.join(args.output_dir,'header.sam')}\n addRG(in_files, args)\n return args", "def setActuatorParams(self, r1 = 1 , r2 = 10 , ja = 1 , ba = 1 ):\n \n # Gear ratio\n self.R = [np.diag([r1,r1]),np.diag([r1,r2]),np.diag([r2,r1]),np.diag([r2,r2])]\n \n # Inertia\n self.Ia = np.diag([ja,ja])\n \n # Damping (linear)\n self.Da = np.diag([ba,ba])", "def setheaders(f):\n f.headers['OBSERVER'] = \"'%s'\" % camera.status.observer\n f.headers['FILTERID'] = \"'%s'\" % filtname(camera.status.filter)\n f.headers['FILTER'] = \"%1d\" % camera.status.filter\n f.headers['XYSTAGE'] = \"'%d,%d'\" % camera.status.guider\n f.headers['MIRROR'] = \"'%s'\" % camera.status.mirror\n if camera.status.imgtype == 'BIAS':\n f.headers['BIAS'] = camera.status.object\n elif camera.status.imgtype == 'DARK':\n f.headers['DARK'] = camera.status.object\n else:\n f.headers['OBJECT'] = camera.status.object\n try:\n skytemp = weather.status.skytemp\n f.headers['SKYTEMP'] = \"%4.1f\" % skytemp\n f.comments['SKYTEMP'] = \"'Infrared sky temp in degC'\"\n except:\n pass\n\n try:\n if not camera.status.TJ.current.posviolate: #Position calibrated to epoch\n ra = camera.status.TJ.current.Ra/15/3600\n dec = camera.status.TJ.current.Dec/3600\n epoch = camera.status.TJ.current.Epoch\n alt = camera.status.TJ.current.Alt\n GotTJ = True\n elif camera.status.TJ.current.RaC:\n ra = camera.status.TJ.current.RaC\n dec = camera.status.TJ.current.DecC\n alt = camera.status.TJ.current.Alt\n t = time.gmtime()\n epoch = t.tm_year + (t.tm_yday/366.0)\n GotTJ = True\n else:\n GotTJ = False\n except AttributeError:\n GotTJ = False \n if GotTJ:\n f.headers['RA_OBJ'] = \"%12.9f\" % (ra*15.0)\n f.headers['RA'] = \"'%s'\" % sexstring(ra)\n f.headers['DEC_OBJ'] = \"%13.9f\" % dec\n f.headers['DEC'] = \"'%s'\" % sexstring(dec)\n f.headers['EQUINOX'] = \"%6.1f\" % epoch\n f.headers['SECZ'] = \"%6.3f\" % (1/math.cos((90-alt)*math.pi/180))\n if GotFT:\n hjd,message = fitstime.findtime(fimage=f, verbose=0, allfields=0)\n if type(hjd) == float:\n f.headers['HJD'] = \"%f\" % hjd\n f.comments['HJD'] = \"Heliocentric Julian Day at exposure midpoint\"", "def __init__(self, parameter):\n self.width = WIDTH_COEF * parameter\n self.high = HIGH_COEF * parameter\n self.horizontal_border = [[BORDER_TYPE] + [BORDER_TYPE] * self.width + [BORDER_TYPE]]\n self.empy_body = [[BORDER_TYPE] + [\" \"] * self.width + [BORDER_TYPE] for i in range(self.high)]\n self.flag = self.horizontal_border + self.empy_body + self.horizontal_border", "def horn_adjust(x, y):\n debug=False\n #debug=True\n meanX = x.mean(axis=0)\n meanY = y.mean(axis=0)\n translation = meanY - meanX\n x_centered = x - meanX\n y_centered = y - meanY\n if debug:\n print(\"x_centered\")\n print(x_centered)\n print(\"y_centered\")\n print(y_centered)\n # Find how much to rescale the x's. Entrywise multiplication.\n x_scale = np.sqrt((x_centered * x_centered).sum())\n y_scale = np.sqrt((y_centered * y_centered).sum())\n scale_factor = y_scale / x_scale\n x_centered_prime = x_centered * scale_factor\n if debug:\n print(\"scale_factor\")\n print(scale_factor)\n print(\"x_centered_prime\")\n print(x_centered_prime)\n # Find angle to rotate the planes\n x_perp = np.cross(x_centered_prime[0], x_centered_prime[1])\n y_perp = np.cross(y_centered[0], y_centered[1])\n # Find rotation matrix to rotate the x plane into the y plane\n # Using https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d\n # https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula\n x_perp_unit = x_perp / np.linalg.norm(x_perp)\n y_perp_unit = y_perp / np.linalg.norm(y_perp)\n v = np.cross(x_perp_unit, y_perp_unit)\n s = np.linalg.norm(v) # sine of angle between the planes\n c = x_perp_unit.dot(y_perp_unit) # cosine of angle between the planes\n v_x = np.array([[0, -v[2], v[1]],\n [v[2], 0, -v[0]],\n [-v[1], v[0], 0]])\n # rotation_p acts on the plane\n rotation_p = np.eye(3) + v_x + v_x.dot(v_x) * (1 - c) / s**2.0\n # Transpose to make each x a column vector, then transpose back for next part\n x_plane = rotation_p.dot(x_centered_prime.T).T\n # Now rotate within the plane, as in Sec. 5 of Horn\n v_y = np.array([[0, -y_perp_unit[2], y_perp_unit[1]],\n [y_perp_unit[2], 0, -y_perp_unit[0]],\n [-y_perp_unit[1], y_perp_unit[0], 0]])\n s_win_tmp = np.sum([np.cross(x_plane[i], y_centered[i]) for i in range(3)],\n axis=0).dot(y_perp_unit)\n c_win_tmp = np.sum([x_plane[i].dot(y_centered[i]) for i in range(3)],\n axis=0)\n sin_theta = s_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n cos_theta = c_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n rotation_win = np.eye(3) + sin_theta * v_y + (1 - cos_theta) * v_y.dot(v_y)\n # transpose so each column is an x vector, then transpose back at the end\n # x_final = rotation_win.dot(x_final.T).T\n rotation_full = rotation_win.dot(rotation_p)\n # Ignore scale_factor\n # T(x) = Ax + b\n A = rotation_full\n b = meanY - rotation_full.dot(meanX)\n if debug:\n print(\"A\")\n print(rotation_full)\n print(\"b\")\n print(b)\n return(A, b)", "def __init__(self, standardize=True, hdr_alpha=0.05, tr=0.720, low_pass=None, high_pass=None, s_filter=False):\n\n self.standardize = standardize\n self.s_filter = s_filter\n self.low_pass = low_pass\n self.high_pass = high_pass\n self.tr = tr\n self.hdr_alpha = hdr_alpha", "def from_alignment(self, alignment, image=None):\n self.x = alignment[\"x\"]\n self.w = alignment[\"w\"]\n self.y = alignment[\"y\"]\n self.h = alignment[\"h\"]\n self.frame_dims = alignment[\"frame_dims\"]\n self.landmarksXY = alignment[\"landmarksXY\"]\n if image.any():\n self.image_to_face(image)", "def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]", "def initializeParameters(self):\r\n\t\tself.input_raster.enabled = True\r\n\t\tself.approach.enabled = True\r\n\t\tself.predefined_pattern.enabled = False\r\n\t\tself.predefined_pattern.value = 'Mexican Hat wavelet'\r\n\t\tself.pattern_workspace.enabled = False\r\n\t\tself.point_matrix_size.enabled = False\r\n\t\tself.point_matrix_size.value = 3\r\n\t\tself.point_vectors.enabled = False\r\n\t\tself.mapping_field.enabled = False\r\n\t\tself.move_to_max.enabled = False\r\n\t\tself.move_to_max_distance.enabled = False\r\n\t\tself.move_to_max_distance.value = 3\r\n\t\tself.mh_iteration.enabled = False\r\n\t\tself.mh_dil_val.enabled = False\r\n\t\tself.mh_dil_val.value = 1\r\n\t\tself.mh_dil_start.value = 0.01\r\n\t\tself.mh_dil_stop.value = 1\r\n\t\tself.mh_dil_step.value = 0.1\r\n\t\tself.mh_dil_start.enabled = False\r\n\t\tself.mh_dil_stop.enabled = False\r\n\t\tself.mh_dil_step.enabled = False\r\n\t\tself.transform.enabled = False\r\n\t\tself.size_of_the_cell.enabled = False\r\n\t\tself.size_of_the_cell.value = 1\r\n\t\tself.output_sim_matrix.enabled = False\r\n\t\tself.output_table.enabled = False\r\n\t\tself.output_raster_workspace.enabled = False", "def __init__(\n self,\n arg=None,\n align=None,\n alignsrc=None,\n fill=None,\n font=None,\n format=None,\n formatsrc=None,\n height=None,\n line=None,\n prefix=None,\n prefixsrc=None,\n suffix=None,\n suffixsrc=None,\n values=None,\n valuessrc=None,\n **kwargs\n ):\n super(Header, self).__init__(\"header\")\n\n # Validate arg\n # ------------\n if arg is None:\n arg = {}\n elif isinstance(arg, self.__class__):\n arg = arg.to_plotly_json()\n elif isinstance(arg, dict):\n arg = _copy.copy(arg)\n else:\n raise ValueError(\n \"\"\"\\\nThe first argument to the plotly.graph_objs.table.Header \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.table.Header`\"\"\"\n )\n\n # Handle skip_invalid\n # -------------------\n self._skip_invalid = kwargs.pop(\"skip_invalid\", False)\n\n # Import validators\n # -----------------\n from plotly.validators.table import header as v_header\n\n # Initialize validators\n # ---------------------\n self._validators[\"align\"] = v_header.AlignValidator()\n self._validators[\"alignsrc\"] = v_header.AlignsrcValidator()\n self._validators[\"fill\"] = v_header.FillValidator()\n self._validators[\"font\"] = v_header.FontValidator()\n self._validators[\"format\"] = v_header.FormatValidator()\n self._validators[\"formatsrc\"] = v_header.FormatsrcValidator()\n self._validators[\"height\"] = v_header.HeightValidator()\n self._validators[\"line\"] = v_header.LineValidator()\n self._validators[\"prefix\"] = v_header.PrefixValidator()\n self._validators[\"prefixsrc\"] = v_header.PrefixsrcValidator()\n self._validators[\"suffix\"] = v_header.SuffixValidator()\n self._validators[\"suffixsrc\"] = v_header.SuffixsrcValidator()\n self._validators[\"values\"] = v_header.ValuesValidator()\n self._validators[\"valuessrc\"] = v_header.ValuessrcValidator()\n\n # Populate data dict with properties\n # ----------------------------------\n _v = arg.pop(\"align\", None)\n self[\"align\"] = align if align is not None else _v\n _v = arg.pop(\"alignsrc\", None)\n self[\"alignsrc\"] = alignsrc if alignsrc is not None else _v\n _v = arg.pop(\"fill\", None)\n self[\"fill\"] = fill if fill is not None else _v\n _v = arg.pop(\"font\", None)\n self[\"font\"] = font if font is not None else _v\n _v = arg.pop(\"format\", None)\n self[\"format\"] = format if format is not None else _v\n _v = arg.pop(\"formatsrc\", None)\n self[\"formatsrc\"] = formatsrc if formatsrc is not None else _v\n _v = arg.pop(\"height\", None)\n self[\"height\"] = height if height is not None else _v\n _v = arg.pop(\"line\", None)\n self[\"line\"] = line if line is not None else _v\n _v = arg.pop(\"prefix\", None)\n self[\"prefix\"] = prefix if prefix is not None else _v\n _v = arg.pop(\"prefixsrc\", None)\n self[\"prefixsrc\"] = prefixsrc if prefixsrc is not None else _v\n _v = arg.pop(\"suffix\", None)\n self[\"suffix\"] = suffix if suffix is not None else _v\n _v = arg.pop(\"suffixsrc\", None)\n self[\"suffixsrc\"] = suffixsrc if suffixsrc is not None else _v\n _v = arg.pop(\"values\", None)\n self[\"values\"] = values if values is not None else _v\n _v = arg.pop(\"valuessrc\", None)\n self[\"valuessrc\"] = valuessrc if valuessrc is not None else _v\n\n # Process unknown kwargs\n # ----------------------\n self._process_kwargs(**dict(arg, **kwargs))\n\n # Reset skip_invalid\n # ------------------\n self._skip_invalid = False", "def affine_params(key, o, u, ifactor=1.0):\n keys = random.split(key, 2)\n ifactor = ifactor / np.sqrt(u)\n return {'w' : random.normal(keys[0], (o, u)) * ifactor,\n 'b' : np.zeros((o,))}", "def align(self):\n return self[\"align\"]", "def align(self):\n return self[\"align\"]", "def align(self):\n return self[\"align\"]", "def _addStatsHeadersToMatrix(self, m):\n\n atoz = \"JKLMNOPQRSTUVWXYZABCDEFGHI\"\n\n counter = 0\n\n for col in m.TopAxis.DataMembers:\n if counter < 26:\n logicalletter = str(atoz[counter])\n col.MemberSigTestHeading = logicalletter\n counter += 1\n else:\n counter = 0", "def generate_anchors(self):\n self.anchors = np.zeros((self.anchor_num, 4), dtype=np.float32)\n size = self.stride * self.stride\n count = 0\n for r in self.ratios:\n ws = int(math.sqrt(size * 1. / r))\n hs = int(ws * r)\n\n for s in self.scales:\n w = ws * s\n h = hs * s\n self.anchors[count][:] = [-w * 0.5, -h * 0.5, w * 0.5, h * 0.5][:]\n count += 1", "def override_config(args):\n args.transformer_enc_config = (\n \"((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 3\"\n )", "def override_config(args):\n args.transformer_enc_config = (\n \"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3\"\n )", "def override_config(args):\n args.transformer_enc_config = (\n \"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3\"\n )", "def test_align(self):\n al = align(self.amp1, self.amp2).m\n\n # Both objects are already centered, so should be close to origin (allowing for some inaccuracy)\n self.assertAlmostEqual(al.vert.mean(axis=0)[0], 0, delta=TestAlign.DELTA)\n self.assertAlmostEqual(al.vert.mean(axis=0)[1], 0, delta=TestAlign.DELTA)\n self.assertAlmostEqual(al.vert.mean(axis=0)[2], 0, delta=TestAlign.DELTA)", "def SetToolAlignment(self, alignment=wx.EXPAND):\r\n\r\n self._tool_alignment = alignment", "def putCoordInfo(self, hdu):\n\n hdu.header.update(\"CDELT1\", self.cdelt, \"pixel size, degrees\")\n hdu.header.update(\"CDELT2\", self.cdelt, \"pixel size, degrees\")\n shape = hdu.data.shape\n crpix1 = (shape[0] + 1) // 2 + 1\n crpix2 = (shape[1] + 1) // 2 + 1\n hdu.header.update(\"CRPIX1\", float(crpix1), \"reference pixel\")\n hdu.header.update(\"CRPIX2\", float(crpix2), \"reference pixel\")", "def __init__(self):\n Page.__init__(self, u\"Esfera, parametrización por proyecciones estereográficas\")\n\n r = .998\n esf = ParametricPlot3D(lambda t, f: (r * sin(t) * cos(f), r * sin(t) * sin(f), r * cos(t)), (0, pi, 70), (0, 2 * pi, 70))\n# esf.setAmbientColor(_1(99,136,63))\n esf.setDiffuseColor(_1(99, 136, 63))\n esf.setSpecularColor(_1(99, 136, 63))\n\n\n def proyZm1(u, v, t1):\n \"\"\"proy desde el polo norte al plano z=-1\"\"\"\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)\n\n def proyZ1(u, v, t2):\n \"\"\"proy desde el polo sur al plano z=1\"\"\"\n den = u ** 2 + v ** 2 + 4\n x = u - t2 * (u - 4 * u / den)\n y = v - t2 * (v - 4 * v / den)\n z = 1 - t2 * (2 - 8 / den)\n return (x, y, z)\n\n stereo = ParametricPlot3D(proyZm1, (-3, 3, 70), (-3, 3, 70))\n stereo.setLinesVisible(True)\n stereo.setMeshVisible(False)\n stereo.setMeshDiffuseColor(_1(117, 55, 79))\n\n stereo2 = ParametricPlot3D(proyZ1, (-3, 3, 70), (-3, 3, 70))\n stereo2.setLinesVisible(True)\n stereo2.setMeshVisible(False)\n stereo2.setMeshDiffuseColor(_1(80, 87, 193))\n stereo2.setTransparency(0.5)\n stereo2.setTransparencyType(8)\n\n\n baseplane = BasePlane()\n baseplane.setHeight(-1.005)\n baseplane.setRange((-4, 4, 7))\n self.addChild(esf)\n self.addChild(stereo2)\n self.addChild(stereo)\n self.addChild(baseplane)\n\n params = [stereo,stereo2]\n\n ## no queremos los controles\n for i,p in enumerate(params):\n p.parameters['t%d' % (i+1)].hide()\n\n anims = [p.parameters['t%d' % (i+1)].asAnimation() for i,p in enumerate(params)]\n self.setupAnimations(anims)", "def set_TranslationsInTiltSeries(self, TiltSeries_):\n for (kk, Proj) in enumerate(TiltSeries_._ProjectionList):\n Proj._alignmentTransX = self._alignmentTransX[kk]\n Proj._alignmentTransY = self._alignmentTransY[kk]", "def CalcAtmTransmissionForImage(img, header='', chanInfo='', airmass=1.5,pwv=-1, \n spectralaxis=-1, \n value='transmission', P=-1, H=-1, \n T=-1, altitude=-1):\n if (header == ''):\n print \"imhead\", # the comma prevents the newline so that ...10...20 will be on same line\n header = imhead(img,mode='list')\n if (type(header) != dict):\n # Input was a spectrum rather than an image\n if (chanInfo[1] < 60e9):\n telescopeName = 'ALMA'\n else:\n telescopeName = 'VLA'\n else:\n telescopeName = header['telescope']\n # this will not match up with the plot, which uses numberOfChannelsInCube\n# freqs = getFreqsForImage(img, header, spectralaxis)\n freqs = np.linspace(chanInfo[1]*1e-9,chanInfo[2]*1e-9,chanInfo[0])\n# print \"freqs: %f-%f\" % (freqs[0], freqs[-1])\n numchan = len(freqs)\n lsrkwidth = (chanInfo[2] - chanInfo[1])/(numchan-1)\n result = cubeLSRKToTopo(img, nchan=numchan, f0=chanInfo[1], f1=chanInfo[2], chanwidth=lsrkwidth)\n if (result is None):\n topofreqs = freqs\n else:\n topoWidth = (result[1]-result[0])/(numchan-1)\n topofreqs = np.linspace(result[0], result[1], chanInfo[0]) * 1e-9\n casalogPost(\"Converted LSRK range (%f-%f) to TOPO (%f-%f) over %d channels\" % (chanInfo[1]*1e-9, chanInfo[2]*1e-9,topofreqs[0],topofreqs[-1],numchan))\n P0 = 1000.0 # mbar\n H0 = 20.0 # percent\n T0 = 273.0 # Kelvin\n if (telescopeName.find('ALMA') >= 0 or telescopeName.find('ACA') >= 0):\n pwv0 = 1.0 \n P0 = 563.0\n H0 = 20.0\n T0 = 273.0\n altitude0 = 5059\n elif (telescopeName.find('VLA') >= 0):\n P0 = 786.0\n pwv0 = 5.0 \n altitude0 = 2124\n else:\n pwv0 = 10.0 \n altitude0 = 0\n if (pwv < 0):\n pwv = pwv0\n if (T < 0):\n T = T0\n if (H < 0):\n H = H0\n if (P < 0):\n P = P0\n if (altitude < 0):\n altitude = altitude0\n tropical = 1\n midLatitudeSummer = 2\n midLatitudeWinter = 3\n# print \"image bandwidth = %f GHz\" % (np.max(freqs)-np.min(freqs))\n reffreq = np.mean(topofreqs)\n numchanModel = numchan*1\n chansepModel = (topofreqs[-1]-topofreqs[0])/(numchanModel-1)\n# print \"regridded bandwidth=%f GHz, chansep=%f, reffreq=%f\" % (np.max(topofreqs)-np.min(topofreqs), chansepModel, reffreq)\n nbands = 1\n myqa = createCasaTool(qatool)\n fCenter = create_casa_quantity(myqa, reffreq, 'GHz')\n fResolution = create_casa_quantity(myqa, chansepModel, 'GHz')\n fWidth = create_casa_quantity(myqa, numchanModel*chansepModel, 'GHz')\n myat = casac.atmosphere()\n myat.initAtmProfile(humidity=H, temperature=create_casa_quantity(myqa,T,\"K\"),\n altitude=create_casa_quantity(myqa,altitude,\"m\"),\n pressure=create_casa_quantity(myqa,P,'mbar'),atmType=midLatitudeWinter)\n myat.initSpectralWindow(nbands, fCenter, fWidth, fResolution)\n myat.setUserWH2O(create_casa_quantity(myqa, pwv, 'mm'))\n# myat.setAirMass() # This does not affect the opacity, but it does effect TebbSky, so do it manually.\n myqa.done()\n\n dry = np.array(myat.getDryOpacitySpec(0)[1])\n wet = np.array(myat.getWetOpacitySpec(0)[1]['value'])\n TebbSky = myat.getTebbSkySpec(spwid=0)[1]['value']\n # readback the values to be sure they got set\n \n rf = myat.getRefFreq()['value']\n cs = myat.getChanSep()['value']\n if (myat.getRefFreq()['unit'] != 'GHz'):\n casalogPost(\"There is a unit mismatch for refFreq in the atm code.\")\n if (myat.getChanSep()['unit'] != 'MHz'):\n casalogPost(\"There is a unit mismatch for chanSep in the atm code.\")\n numchanModel = myat.getNumChan()\n freq0 = myat.getChanFreq(0)['value']\n freq1 = myat.getChanFreq(numchanModel-1)['value']\n# print \"atm returned bandwidth = %f GHz = %f to %f \" % (freq1-freq0, freq0, freq1)\n newfreqs = np.linspace(freqs[0], freqs[-1], numchanModel) # fix for SCOPS-4815\n# print \"freqs: %f-%f newfreqs: %f-%f\" % (freqs[0], freqs[-1], newfreqs[0], newfreqs[-1])\n transmission = np.exp(-airmass*(wet+dry))\n TebbSky *= (1-np.exp(-airmass*(wet+dry)))/(1-np.exp(-wet-dry))\n if value=='transmission':\n values = transmission\n else:\n values = TebbSky\n del myat\n return(newfreqs, values)", "def update_view( angle, data ):\r\n global offset, use_outer_line, use_line\r\n\r\n #reset the point display\r\n point.pos[angle] = vector( 0, 0, 0 )\r\n pointb.pos[angle] = vector( 0, 0, 0 )\r\n point2.pos[angle] = vector( 0, 0, 0 )\r\n point2b.pos[angle] = vector( 0, 0, 0 )\r\n\r\n #unpack data using the denomination used during the discussions\r\n x = data[0]\r\n x1= data[1]\r\n x2= data[2]\r\n x3= data[3]\r\n \r\n angle_rad = angle * pi / 180.0\r\n c = cos(angle_rad)\r\n s = -sin(angle_rad)\r\n\r\n dist_mm = x | (( x1 & 0x3f) << 8) # distance is coded on 13 bits ? 14 bits ?\r\n quality = x2 | (x3 << 8) # quality is on 16 bits\r\n\r\n dist_x = dist_mm*c\r\n dist_y = dist_mm*s\r\n\r\n if not use_lines : lines[angle].pos[1]=(offset*c,0,offset*s)\r\n if not use_outer_line :\r\n outer_line.pos[angle]=(offset*c,0,offset*s)\r\n outer_line.color[angle] = (0.1, 0.1, 0.2)\r\n \r\n \r\n # display the sample\r\n if x1 & 0x80: # is the flag for \"bad data\" set?\r\n # yes it's bad data\r\n lines[angle].pos[1]=(offset*c,0,offset*s)\r\n outer_line.pos[angle]=(offset*c,0,offset*s)\r\n outer_line.color[angle] = (0.1, 0.1, 0.2)\r\n else:\r\n # no, it's cool\r\n if not x1 & 0x40:\r\n # X+1:6 not set : quality is OK\r\n if use_points : point.pos[angle] = vector( dist_x,0, dist_y)\r\n if use_intensity : point2.pos[angle] = vector( (quality + offset)*c,0, (quality + offset)*s)\r\n if use_lines : lines[angle].color[1] = (1,0,0)\r\n if use_outer_line : outer_line.color[angle] = (1,0,0)\r\n else:\r\n # X+1:6 set : Warning, the quality is not as good as expected\r\n if use_points : pointb.pos[angle] = vector( dist_x,0, dist_y)\r\n if use_intensity : point2b.pos[angle] = vector( (quality + offset)*c,0, (quality + offset)*s)\r\n if use_lines : lines[angle].color[1] = (0.4,0,0)\r\n if use_outer_line : outer_line.color[angle] = (0.4,0,0)\r\n if use_lines : lines[angle].pos[1]=( dist_x, 0, dist_y)\r\n if use_outer_line : outer_line.pos[angle]=( dist_x, 0, dist_y)", "def __init__(self,\n mass,\n width,\n x0_1, x0_2,\n v0_1=0, v0_2=0,\n h=0.01):\n super().__init__(mass, mass,\n width, width,\n x0_1, x0_2,\n v0_1, v0_2, h)" ]
[ "0.5652911", "0.5642781", "0.5243032", "0.51255065", "0.5104375", "0.5091596", "0.5088672", "0.5071364", "0.50606203", "0.5045389", "0.5044001", "0.50383675", "0.50192225", "0.5016494", "0.5012836", "0.50028354", "0.49517918", "0.49404538", "0.4933246", "0.49027255", "0.48972568", "0.48952663", "0.48940238", "0.48893216", "0.48852566", "0.48648822", "0.48583472", "0.4858004", "0.48541793", "0.4842759", "0.48401117", "0.48356426", "0.48333356", "0.48012313", "0.47841403", "0.4783138", "0.4780045", "0.47737548", "0.47735754", "0.476997", "0.47606415", "0.47582534", "0.47435507", "0.47360128", "0.47342056", "0.47281554", "0.47237122", "0.47227535", "0.47211555", "0.47158", "0.47101736", "0.47060028", "0.47041813", "0.46833602", "0.46820414", "0.46745887", "0.46687764", "0.4663258", "0.46611565", "0.46584898", "0.46494237", "0.46491542", "0.4648875", "0.4648762", "0.46485084", "0.46410447", "0.46392834", "0.4631659", "0.4630679", "0.4628798", "0.46282774", "0.46256056", "0.46233457", "0.4619161", "0.46190134", "0.46130645", "0.46065858", "0.4605852", "0.4605576", "0.4605304", "0.46037388", "0.46036464", "0.45960897", "0.45957106", "0.45914719", "0.45914719", "0.45914719", "0.45907027", "0.45732406", "0.45683312", "0.45665857", "0.45665857", "0.45615727", "0.45613572", "0.4560963", "0.4559183", "0.45572442", "0.4553753", "0.4548464", "0.45458347" ]
0.55857396
2
retrieve 3D alignment parameters from the header phi theta psi tx ty tz mirror scale
def get_params3D(ima, xform = "xform.align3d"): t = ima.get_attr(xform) d = t.get_params("spider") return d["phi"],d["theta"],d["psi"],d["tx"],d["ty"],d["tz"],d["mirror"],d["scale"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr", "def _mat3(self):\n if self.frame.orientation == HillFrame.DEFAULT_ORIENTATION:\n return np.identity(3)\n else:\n return self.QSW2TNW", "def euler_to_rot3d(psi, theta, phi):\n rphi = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n rtheta = np.array([[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n rpsi = np.array([[np.cos(psi), -np.sin(psi), 0],\n [np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(rpsi, np.dot(rtheta, rphi))", "def htm0_3(joint_rotations):\n # H0_1\n r0_1 = np.dot(rot_x(90), rot_y(joint_rotations[0]))\n d0_1 = transl(0, 0, a1)\n h0_1 = htm(r0_1, d0_1)\n\n # H1_2\n r1_2 = rot_z(joint_rotations[1])\n x1_2 = a2*np.cos(np.radians(joint_rotations[1]))\n y1_2 = a2*np.sin(np.radians(joint_rotations[1]))\n z1_2 = 0\n d1_2 = transl(x1_2, y1_2, z1_2)\n h1_2 = htm(r1_2, d1_2)\n\n # H2_3\n r2_3 = rot_z(joint_rotations[2])\n x2_3 = a3*np.cos(np.radians(joint_rotations[2]))\n y2_3 = a3*np.sin(np.radians(joint_rotations[2]))\n z2_3 = 0\n d2_3 = transl(x2_3, y2_3, z2_3)\n h2_3 = htm(r2_3, d2_3)\n\n # H0_3\n h0_2 = np.dot(h0_1, h1_2)\n h0_3 = np.dot(h0_2, h2_3)\n return h0_3", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def fk3(joint_rotations):\n h0_3 = htm0_3(joint_rotations)\n x0_3 = h0_3[0, 3]\n y0_3 = h0_3[1, 3]\n z0_3 = h0_3[2, 3]\n d0_3 = [x0_3, y0_3, z0_3]\n return d0_3", "def get_M(self, theta, phi, gamma, dx, dy, dz):\n w = self.width\n h = self.height\n f = self.focal\n # Projection 2D -> 3D matrix\n A1 = np.array([[1, 0, -w / 2],\n [0, 1, -h / 2],\n [0, 0, 1],\n [0, 0, 1]])\n # Rotation matrices around the X, Y, and Z axis\n RX = np.array([[1, 0, 0, 0],\n [0, np.cos(theta), -np.sin(theta), 0],\n [0, np.sin(theta), np.cos(theta), 0],\n [0, 0, 0, 1]])\n RY = np.array([[np.cos(phi), 0, -np.sin(phi), 0],\n [0, 1, 0, 0],\n [np.sin(phi), 0, np.cos(phi), 0],\n [0, 0, 0, 1]])\n RZ = np.array([[np.cos(gamma), -np.sin(gamma), 0, 0],\n [np.sin(gamma), np.cos(gamma), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n # Composed rotation matrix with (RX, RY, RZ)\n R = np.dot(np.dot(RX, RY), RZ)\n # Translation matrix\n T = np.array([[1, 0, 0, dx],\n [0, 1, 0, dy],\n [0, 0, 1, dz],\n [0, 0, 0, 1]])\n # Projection 3D -> 2D matrix\n A2 = np.array([[f, 0, w / 2, 0],\n [0, f, h / 2, 0],\n [0, 0, 1, 0]])\n # Final transformation matrix\n return np.dot(A2, np.dot(T, np.dot(R, A1)))", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def get_preamble_z(self):\n a = PhysicalLayer.get_preamble()\n return 2,np.array([z for z in a['symb'][0:31] for _ in range(self._sps)])", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def extract3d(xaxis, yaxis, zaxis, dat3d, crd_sys, xvec,yvec, zvec, pad=0.):\n func = RegularGridInterpolator((xaxis, yaxis, zaxis), dat3d, \n method='linear', bounds_error=False, fill_value=pad)\n\n # convert x,y,z coordinates to spherical coordinates\n if crd_sys == 'car':\n profx = xvec\n profy = yvec\n profz = zvec\n elif crd_sys == 'sph':\n # radius\n profx = np.sqrt(xvec**2 + yvec**2 + zvec**2)\n\n # theta\n tvec = np.arctan2(zvec, np.sqrt(xvec**2 + yvec**2))\n reg = tvec < 0.\n tvec[reg] = tvec[reg] + 2.*np.pi\n profy = tvec\n\n # azimuth\n pvec = np.arctan2(yvec, xvec)\n reg = pvec < 0\n pvec[reg] = pvec[reg] + 2*np.pi\n profz = pvec\n\n nvec = len(xvec)\n prof = np.zeros([nvec], dtype=np.float64)\n for ii in range(nvec):\n prof[ii] = func([profx[ii], profy[ii], profz[ii]])\n\n return prof", "def _get_quaternion_data(self, msg):\n alpha, beta, gamma = PIDController.get_euler_angle_from_quat(msg.quaternion.w, msg.quaternion.x,\n msg.quaternion.y, msg.quaternion.z)\n self._actual_euler[\"alpha\"], self._actual_euler[\"beta\"], self._actual_euler[\"gamma\"] \\\n = alpha, beta, gamma", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def parameters(self):\n # encoded in θ\n return self.theta.columns", "def get_params_proj(ima, xform = \"xform.projection\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],-d[\"tx\"],-d[\"ty\"]", "def getOptimizableVariables(self, TiltAlignmentParameters_):\n ntilt = self._ntilt\n nmark = len(self._Markers)\n\n nopti = (nmark - 1) * 3\n\n # translation\n if self.optimizeMarkerPositions:\n nopti += (ntilt) * 2\n\n # variable magnifications for projections\n if TiltAlignmentParameters_.dmag:\n nopti = nopti + ntilt - 1\n\n #check that irefmark and ireftilt are set properly\n if not (TiltAlignmentParameters_.irefmark in range(nmark)):\n TiltAlignmentParameters_.irefmark = 0\n print(\"Warning: irefmark must be 1<= irefmark <=nmark\")\n print(\"New irefmark: \" + str(TiltAlignmentParameters_.irefmark))\n\n if not (TiltAlignmentParameters_.ireftilt in self._projIndices.astype(int)):\n TiltAlignmentParameters_.ireftilt = abs(self._tiltAngles).argmin()\n print(\"Warning: ireftilt must be in range of projection indices\")\n print(\"New ireftilt: \" + str(TiltAlignmentParameters_.ireftilt))\n\n #variable rotation for projections\n if TiltAlignmentParameters_.drot:\n nopti = nopti + ntilt\n else:\n nopti = nopti + 1\n\n # beam tilt\n if TiltAlignmentParameters_.dbeam:\n nopti = nopti + 1\n\n ## gradient on image rotation and magnification in projections\n #if TiltAlignmentParameters_.dGradRotMag:\n # nopti = nopti + 2\n\n\n # nopti += ntilt\n\n optimizableVariables = numpy.zeros((nopti), dtype='float')\n\n # marker 3D coords\n\n ivar = 0\n for (imark, Marker) in enumerate(self._Markers):\n # reference marker irefmark is fixed to standard value\n if ((imark ) != TiltAlignmentParameters_.irefmark):\n r = Marker.get_r()\n optimizableVariables[ivar] = r[0]\n optimizableVariables[ivar + 1] = r[1]\n optimizableVariables[ivar + 2] = r[2]\n ivar = ivar + 3\n\n # translations\n if self.optimizeMarkerPositions:\n for itilt in range(0, ntilt):\n # translation in reference projection is zero\n #if self._projIndices[itilt] != TiltAlignmentParameters_.ireftilt:\n optimizableVariables[ivar] = self._alignmentTransX[itilt]\n optimizableVariables[ivar + 1] = self._alignmentTransY[itilt]\n ivar = ivar + 2\n\n # magnification changes\n if TiltAlignmentParameters_.dmag:\n for itilt in range(0, ntilt):\n # magnification of reference projection is 1.\n if int(self._projIndices[itilt]) != TiltAlignmentParameters_.ireftilt:\n optimizableVariables[ivar] = self._alignmentMagnifications[itilt]\n ivar = ivar + 1\n\n # image rotations\n if TiltAlignmentParameters_.drot:\n for itilt in range(0, ntilt):\n optimizableVariables[ivar] = self._alignmentRotations[itilt]\n ivar = ivar + 1\n\n # all rotations are the same - take the first one\n else:\n optimizableVariables[ivar] = self._alignmentRotations[0]\n ivar = ivar + 1\n\n # beam inclination\n if TiltAlignmentParameters_.dbeam:\n optimizableVariables[ivar] = self._alignmentBeamTilt\n ivar = ivar + 1\n\n # focus gradient (TODO)\n #if TiltAlignmentParameters_.dGradRotMag:\n # optimizableVariables[ivar] = self._alignmentMagnFoc\n # optimizableVariables[ivar+1] = self._alignmentRotFoc\n\n # for i in range(ntilt):\n # optimizableVariables[ivar] = -1\n # ivar += 1\n\n return optimizableVariables", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def test_3d_tranpose(): \n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n fdic,fdata = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n\n assert_array_equal(data.transpose()[0,1,2],fdata.transpose()[0,1,2])\n assert_array_equal(data.transpose((2,0,1))[0,1,2],\n fdata.transpose((2,0,1))[0,1,2])\n assert_array_equal(data.swapaxes(0,1)[0,1,2],fdata.swapaxes(0,1)[0,1,2])\n assert_array_equal(data.swapaxes(2,0)[0,1,2],fdata.swapaxes(2,0)[0,1,2])", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def rotation3Dz(theta):\n rmat = np.zeros((3,3))\n rmat[0,0] = rmat[1,1] = np.cos(theta)\n rmat[0,1] = np.sin(theta)\n rmat[1,0] = -rmat[0,1]\n rmat[2,2] = 1\n return rmat", "def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs", "def lattice_parameters(self):\n return self.a, self.b, self.c, self.alpha, self.beta, self.gamma", "def get_pars(self):\n return [self.z, self.b, self.logN]", "def parameters(self, t):\n if self.fixed_params:\n return (self.As[:, :, 0], self.Bs[:, :, 0], self.Cs[:, :, 0], self.Ds[:, :, 0],\n self.Qs[:, :, 0], self.Rs[:, :, 0])\n\n return (self.As[:, :, t + 1], self.Bs[:, :, t + 1], self.Cs[:, :, t + 1], self.Ds[:, :, t + 1],\n self.Qs[:, :, t + 1], self.Rs[:, :, t + 1])", "def euler2rot3D(psi, theta, phi):\n Rphi = np.array([[np.cos(phi), np.sin(phi), 0],\n [-np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n Rtheta = np.array([[np.cos(theta), 0, -np.sin(theta)],\n [0, 1, 0],\n [np.sin(theta), 0, np.cos(theta)]])\n Rpsi = np.array([[np.cos(psi), np.sin(psi), 0],\n [-np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(Rpsi, np.dot(Rtheta, Rphi))", "def get_acquisition_pars(theta=None, phi=None, shift=None, nx=None, ny=None, cfg=None):\n # ss_rect_map = {(13, 13): 1E7, (13, 14): 1E7, (13, 15): 1E7, (13, 16): 1E7, (13, 17): 1E7,\n # (14, 13): 1E7, (14, 14): 1E5, (14, 15): 1E5, (14, 16): 1E5, (14, 17): 1E7,\n # (15, 13): 1E7, (15, 14): 1E5, (15, 15): 5E4, (15, 16): 1E5, (15, 17): 1E7,\n # (16, 13): 1E7, (16, 14): 1E5, (16, 15): 1E5, (16, 16): 1E5, (16, 17): 1E7,\n # (17, 13): 1E7, (17, 14): 1E7, (17, 15): 1E7, (17, 16): 1E7, (17, 17): 1E7}\n nmeans_dict = {(15, 15): 1,\n(16, 15): 1,\n(16, 16): 1,\n(15, 16): 1,\n(14, 16): 1,\n(14, 15): 1,\n(14, 14): 1,\n(15, 14): 1,\n(16, 14): 1,\n(17, 14): 2,\n(17, 15): 1,\n(17, 16): 1,\n(17, 17): 2,\n(16, 17): 1,\n(15, 17): 1,\n(14, 17): 1,\n(13, 17): 1,\n(13, 16): 2,\n(13, 15): 1,\n(13, 14): 1,\n(13, 13): 5,\n(14, 13): 2,\n(15, 13): 1,\n(16, 13): 5,\n(17, 13): 5,\n(18, 13): 5,\n(18, 14): 5,\n(18, 15): 5,\n(18, 16): 5,\n(18, 17): 5,\n(18, 18): 5,\n(17, 18): 5,\n(16, 18): 2,\n(15, 18): 1,\n(14, 18): 1,\n(13, 18): 5,\n(12, 18): 5,\n(12, 17): 5,\n(12, 16): 5,\n(12, 15): 5,\n(12, 14): 5,\n(12, 13): 5,\n(12, 12): 5,\n(13, 12): 5,\n(14, 12): 5,\n(15, 12): 5,\n(16, 12): 5,\n(17, 12): 5,\n(18, 12): 5,\n(19, 12): 5,\n(19, 13): 5,\n(19, 14): 5,\n(19, 15): 5,\n(19, 16): 5,\n(19, 17): 5,\n(19, 18): 5,\n(19, 19): 5,\n(18, 19): 5,\n(17, 19): 5,\n(16, 19): 5,\n(15, 19): 5,\n(14, 19): 5,\n(13, 19): 5,\n(12, 19): 5,\n(11, 19): 5,\n(11, 18): 5,\n(11, 17): 5,\n(11, 16): 5,\n(11, 15): 5,\n(11, 14): 5,\n(11, 13): 5,\n(11, 12): 5,\n(11, 11): 5,\n(12, 11): 5,\n(13, 11): 5,\n(14, 11): 5,\n(15, 11): 5,\n(16, 11): 5,\n(17, 11): 5,\n(18, 11): 5,\n(19, 11): 5}\n\n\n\n # led_center = 15\n # led_disp = (int(cfg.array_size)+1)//2\n # led_range = range(led_center-led_disp, led_center+led_disp)\n # ledmap = product(led_range, led_range)\n #\n # ss_dict = {}\n # for led in ledmap:\n # # if led == [15, 15]:\n # # ss_dict[(led[0], led[1])] = 60E4\n # # else:\n # dist = (np.abs(led[0]-15)**2+np.abs(led[1]-15))\n # ss = 5.E5*(1+.5*dist)\n # ss_dict[(led[0], led[1])] = ss\n # if ss >3E6:\n # ss_dict[(led[0], led[1])] = 3E6\n\n power = 255\n # Camera parameters\n if nx is not None:\n # if nx == 14 or nx == 15 or nx ==16 or ny == 15 or ny ==16 or ny == 14:\n # shutter_speed = 50000\n # else:\n # shutter_speed = 500000\n # nmeans = nmeans_dict[nx, ny]\n # if [nx, ny] in [[15, 15], [15, 16], [14, 17], [14,16], [14, 15],\n # [14, 14], [13,16], [13, 15]]:\n # shutter_speed = 100000\n # nmeans = 1\n # else:\n # shutter_speed = 600000\n # nmeans = 1\n\n try:\n # shutter_speed = ss_dict[nx, ny]\n shutter_speed = 50000\n nmeans = nmeans_dict[nx, ny]\n except:\n shutter_speed = 1E5\n nmeans = 1\n return float(cfg.iso), shutter_speed, power, nmeans\n\n shutter_speed_min = cfg.shutter_speed[0]\n shutter_speed_max = cfg.shutter_speed[0]\n if phi == None:\n if shift == None:\n raise Exception(\"Must assign a value either for phi or shift.\")\n shutter_speed = translate(phi, 0, cfg.shift_max,\n shutter_speed_min, shutter_speed_max)\n else:\n shutter_speed = translate(phi, 0, 90,\n shutter_speed_min, shutter_speed_max)\n # Led parameters\n led_power = cfg.max_led_power\n return cfg.iso, shutter_speed, led_power, nmeans", "def T(params, phi):\n\t# handle the base frame, eqn 3.9, p36\n\tt = np.array([\n\t\t[cos(phi), -sin(phi), 0.0, 0.0],\n\t\t[sin(phi), cos(phi), 0.0, 0.0],\n\t\t[0.0, 0.0, 1.0, 0.0],\n\t\t[0.0, 0.0, 0.0, 1.0]\n\t])\n\tfor i, p in enumerate(params):\n\t\tt = t.dot(rot(*p))\n\treturn t", "def get_image_plane_metadata(self, p, c, z):\n coord_str = f'({p}, 0, {c}, {z})'\n return self.store.attrs.get('ImagePlaneMetadata').get(coord_str)", "def get_vector_field(self, key):\n f1 = self.get_field(key + '1')\n f2 = self.get_field(key + '2')\n f3 = self.get_field(key + '3')\n x, y, z = self.get_cell_center_coordinates()\n if self.geometry == 'cartesian':\n fx = f1\n fy = f2\n fz = f3\n elif self.geometry == 'spherical':\n \"\"\"\n http://en.wikipedia.org/wiki/\n List_of_common_coordinate_transformations#To_Spherical_coordinates\n \"\"\"\n r2 = x**2 + y**2 + z**2\n rh = [x/r2**0.5, y/r2**0.5, z/r2**0.5]\n th = [x*z/r2*(x**2 + y**2)**-0.5,\n y*z/r2*(x**2 + y**2)**-0.5,\n -1./r2*(x**2 + y**2)**+0.5] \n ph = [-y/(x**2 + y**2)+0*r2, x/(x**2 + y**2)+0*r2, 0*r2]\n fx = f1*rh[0] + f2*th[0] + f3*ph[0]\n fy = f1*rh[1] + f2*th[1] + f3*ph[1]\n fz = f1*rh[2] + f2*th[2] + f3*ph[2]\n return fx, fy, fz", "def spinex_phi(infile, sequence):\n return np.loadtxt(infile, usecols=3, skiprows=1).reshape((1, -1, 1))", "def vectorize_params(g_BM, p_H):\n params = np.zeros((10,))\n params[0:4] = tf.transformations.quaternion_from_matrix(g_BM)\n params[4:7] = g_BM[0:3,3]\n params[7:10] = p_H[0:3]\n return params", "def k3(self) -> float:\n return self.distortion_coefficients[2]", "def make_wcsheader(ra=40.07293, dec=-1.6137748, size=2, pixscale=0.1, get_hdu=False, theta=0):\n \n if np.isscalar(pixscale):\n cdelt = [pixscale/3600.]*2\n else:\n cdelt = [pixscale[0]/3600., pixscale[1]/3600.]\n \n if np.isscalar(size):\n npix = np.cast[int]([size/pixscale, size/pixscale])\n else:\n npix = np.cast[int]([size[0]/pixscale, size[1]/pixscale])\n \n hout = pyfits.Header()\n hout['CRPIX1'] = npix[0]/2\n hout['CRPIX2'] = npix[1]/2\n hout['CRVAL1'] = ra\n hout['CRVAL2'] = dec\n hout['CD1_1'] = -cdelt[0]\n hout['CD1_2'] = hout['CD2_1'] = 0.\n hout['CD2_2'] = cdelt[1]\n hout['NAXIS1'] = npix[0]\n hout['NAXIS2'] = npix[1]\n hout['CTYPE1'] = 'RA---TAN'\n hout['CTYPE2'] = 'DEC--TAN'\n \n wcs_out = pywcs.WCS(hout)\n \n theta_rad = np.deg2rad(theta)\n mat = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n\n rot_cd = np.dot(mat, wcs_out.wcs.cd)\n \n for i in [0,1]:\n for j in [0,1]:\n hout['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n wcs_out.wcs.cd[i,j] = rot_cd[i,j]\n \n cd = wcs_out.wcs.cd\n wcs_out.pscale = get_wcs_pscale(wcs_out) #np.sqrt((cd[0,:]**2).sum())*3600.\n \n if get_hdu:\n hdu = pyfits.ImageHDU(header=hout, data=np.zeros((npix[1], npix[0]), dtype=np.float32))\n return hdu\n else:\n return hout, wcs_out", "def get_z(theta, phi):\n return math.cos(phi)/math.tan(theta/2) + 1j*math.sin(phi)/math.tan(theta/2)", "def _calculate_parameters(self, thickness: int = 10):\n\n self.thickness = thickness\n\n # set orientation dependent parameters: (different for x, y, z-PML)\n # NOTE: these methods are implemented by the subclasses of PML.\n self._set_locations()\n self._set_shape()\n self._set_sigmaE()\n self._set_sigmaH()\n\n # set the other parameters\n Nx, Ny, Nz = self.shape # is defined by _set_shape()\n self.phi_E = bd.zeros((Nx, Ny, Nz, 3))\n self.phi_H = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ex = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ey = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ez = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hx = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hy = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hz = bd.zeros((Nx, Ny, Nz, 3))\n\n self.bE = bd.exp(-(self.sigmaE / self.k + self.a) * self.grid.courant_number)\n self.cE = (\n (self.bE - 1.0)\n * self.sigmaE # is defined by _set_sigmaE()\n / (self.sigmaE * self.k + self.a * self.k ** 2)\n )\n\n self.bH = bd.exp(-(self.sigmaH / self.k + self.a) * self.grid.courant_number)\n self.cH = (\n (self.bH - 1.0)\n * self.sigmaH # is defined by _set_sigmaH()\n / (self.sigmaH * self.k + self.a * self.k ** 2)\n )", "def read_camera_params(h5_dataset):\n fx = h5_dataset[0]\n fy = h5_dataset[1]\n skew = h5_dataset[2]\n cx = h5_dataset[3]\n cy = h5_dataset[4]\n K = np.array([[fx, skew, cx],\n [0, fy, cy],\n [0, 0, 1]], dtype=np.float64)\n R = np.array([[h5_dataset[5], h5_dataset[8], h5_dataset[11]],\n [h5_dataset[6], h5_dataset[9], h5_dataset[12]],\n [h5_dataset[7], h5_dataset[10], h5_dataset[13]]], dtype=np.float64)\n t = np.array([h5_dataset[14], h5_dataset[15], h5_dataset[16]], dtype=np.float64)\n return K, R, t", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def parameters_ui(layout, params):\n\n r = layout.row()\n r.prop(params, \"rotation_axis\")\n\n if 'auto' not in params.rotation_axis.lower():\n r = layout.row()\n text = \"Auto align Foot\"\n r.prop(params, \"auto_align_extremity\", text=text)\n\n r = layout.row()\n r.prop(params, \"segments\")\n\n r = layout.row()\n r.prop(params, \"bbones\")\n\n bone_layers = bpy.context.active_pose_bone.bone.layers[:]\n\n for layer in ['fk', 'tweak']:\n r = layout.row()\n r.prop(params, layer + \"_extra_layers\")\n r.active = params.tweak_extra_layers\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(16, 24):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8, 16):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(24, 32):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def _object_kinematics_params(self):\n obj_length, obj_width = self._obj_dims\n # Initial object position w.r.t its center\n obj_coords = np.matmul( # (2, 5) array of x-y coords of five points\n np.array([ # rotational matrix\n [np.cos(self._theta_init), np.sin(self._theta_init)],\n [-np.sin(self._theta_init), np.cos(self._theta_init)]\n ]),\n 0.5 * np.array([ # relative postion matrix\n [0, obj_length, obj_length, -obj_length, -obj_length],\n [0, obj_width, -obj_width, -obj_width, obj_width]\n ])\n )\n feat_vec_desired = obj_coords * self._fz_ratio\n\n # Global initial object position\n obj_coords += np.array([[self._x_obj_0], [self._y_obj_0]])\n speed = np.array([\n [(self._x_obj_f - self._x_obj_0) / self._t_sim],\n [(self._y_obj_f - self._y_obj_0) / self._t_sim]\n ])\n rot_speed = (self._theta_final - self._theta_init) / self._t_sim\n return obj_coords, speed, rot_speed, feat_vec_desired", "def rotation3Dx(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = 1.0, 0.0, 0.0\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, np.cos(theta), np.sin(theta)\n rmat[2,0], rmat[2,1], rmat[2,2] = 0.0, -np.sin(theta), np.cos(theta)\n \n return rmat", "def compute_pose(metadata_line):\n # TODO(kgenova) Implement\n elements = metadata_line.split(' ')\n assert len(elements) == 5\n fake_azi = float(elements[0])\n fake_ele = float(elements[1])\n fake_dist = float(elements[3])\n fake_azi = -fake_azi * math.pi / 180.0\n fake_ele = math.pi / 2.0 - fake_ele * math.pi / 180.0\n fake_dist = fake_dist * 1.75\n x = fake_dist * math.sin(fake_ele) * math.cos(fake_azi)\n y = fake_dist * math.cos(fake_ele)\n z = -fake_dist * math.sin(fake_ele) * math.sin(fake_azi)\n return np.array([x, y, z], dtype=np.float32)", "def R3(theta):\n\n DCM = np.array([[np.cos(theta), np.sin(theta), 0], \n [-np.sin(theta), np.cos(theta), 0], \n [0, 0, 1]])\n\n return DCM", "def rotation3Dy(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = np.cos(theta), 0.0, -np.sin(theta)\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, 1.0, 0.0\n rmat[2,0], rmat[2,1], rmat[2,2] = np.sin(theta), 0.0, np.cos(theta)\n\n return rmat", "def read_parameters():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n param1 = hdulist1[1].data['e1'][:sample]\n param2 = hdulist1[1].data['e2'][:sample]\n weights = hdulist1[1].data['weight'][:sample]\n return param1, param2, weights", "def getU3(state):\n\n # TODO : do the verifictions of the above constraints\n\n theta=2*math.acos(state[0])\n phi=0\n lam=0\n\n if state[1]<0: theta = -theta\n\n # U3=np.array([\n # [math.cos(theta/2) , np.exp(-1j*lam)*math.sin(theta/2)], \n # [np.exp(1j*phi)*math.sin(theta/2), np.exp(1j*(phi+lam))*math.cos(theta/2)]\n # ])\n # print (U3)\n # print ([email protected]([[1],[0]]))\n\n return [theta, phi, lam]", "def common_line_in3D(phiA,thetaA,phiB,thetaB):\n\n\tfrom math import pi, sqrt, cos, sin, asin, atan2\n\n\tpiOver=pi/180.0;\n\tph1 = phiA*piOver; \n\tth1 = thetaA*piOver; \n\tph2 = phiB*piOver; \n\tth2 = thetaB*piOver;\n\t\n \t#nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ;\n\t#ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ;\n\t#nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR);\n\n\n\tnx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2)\n\tny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2)\n\tnz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2)\n\n\tnorm = nx*nx + ny*ny + nz*nz\n \n\tif norm < 1e-5:\n\t\t#print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB\n\t\treturn 0.0, 0.0\n\n\tif nz<0: nx=-nx; ny=-ny; nz=-nz;\n\n\t#thetaCom = asin(nz/sqrt(norm))\n\tphiCom = asin(nz/sqrt(norm))\n\t#phiCom = atan2(ny,nx)\n\tthetaCom = atan2(ny, nx)\n\t\n\treturn phiCom*180.0/pi , thetaCom*180.0/pi", "def mass_3d(r, Rs, rho0, gamma_inner, gamma_outer):\n Rs = float(Rs)\n const = 4 * np.pi * r ** 3 * rho0 * (Rs/r) ** gamma_inner\n m_3d = const/(3-gamma_inner) * hyp2f1((3-gamma_inner)/2,\n (gamma_outer-gamma_inner)/2,\n (5-gamma_inner)/2, -(r/Rs)**2)\n return m_3d", "def inverse_transform3(phi, theta=0.0, psi=0.0, tx=0.0, ty=0.0, tz=0.0, mirror = 0, scale=1.0):\n\n\td = Transform({'type': 'spider', 'phi': phi, 'theta': theta, 'psi': psi, 'tx': tx, 'ty': ty, 'tz': tz, \"mirror\":mirror,\"scale\":scale})\n\td = d.inverse()\n\td = d.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def homogeneous_Z( zTheta , pos ):\r\n return np.vstack( ( np.hstack( ( z_rot( zTheta ) , [ [ pos[0] ] , [ pos[1] ] , [ pos[2] ] ] ) ) ,\r\n np.hstack( ( [ 0 , 0 , 0 ] , [ 1 ] ) ) ) )", "def task_three():\n # Formula to calculate:\n # q2 = (z2 / z1) * (R + T * nt / d) * q1\n # where R - rotation\n # T - translation\n # nt - normal vertex of common plane of the 3d points\n # d - shift of the common plane\n # and (R + T * nt / d) required homography transform\n # defined up to constant\n # But in our case T == 0\n tetta = 30 * np.pi / 180\n H = np.array([[1, 0, 0],\n [0, np.cos(tetta), -np.sin(tetta)],\n [0, np.sin(tetta), np.cos(tetta)],\n ])\n print(\"Homography transformation:\\n\", H)", "def sat_3d_position(sat_2d_position):\n return np.dot(transformation_parameter, xi_eta(sat_2d_position))", "def computeOrientation3D(object, P):\n\n # compute rotational matrix around yaw axis\n R = [[np.cos(object.ry), 0, np.sin(object.ry)],\n [0, 1, 0],\n [-np.sin(object.ry), 0, np.cos(object.ry)]]\n\n # orientation in object coordinate system\n orientation_3D = [[0.0, object.l],\n [0.0, 0.0],\n [0.0, 0.0]]\n\n # rotate and translate in camera coordinate system, project in image\n orientation_3D = R * orientation_3D\n orientation_3D[0, :] += object.t[0]\n orientation_3D[1, :] += object.t[1]\n orientation_3D[2, :] += object.t[2]\n\n # vector behind image plane?\n if any(orientation_3D[2, :] < 0.1):\n orientation_2D = []\n else:\n # project orientation into the image plane\n orientation_2D = projectToImage(orientation_3D, P)\n return orientation_2D", "def parameters(self):\n return {\n 'base':self.base.parameters(),\n 'material':[m.parameters() for m in self.material],\n 'fraction':self.fraction,\n }", "def setCameraRotation3D(ang):\n dislin.vup3d(ang)", "def htm4(joint_rotations):\n # H0_3\n h0_3 = htm0_3(joint_rotations)\n\n # H3_4\n r3_4 = rot_z(joint_rotations[3])\n x3_4 = a4 * np.cos(np.radians(joint_rotations[3]))\n y3_4 = a4 * np.sin(np.radians(joint_rotations[3]))\n z3_4 = 0\n d3_4 = transl(x3_4, y3_4, z3_4)\n h3_4 = htm(r3_4, d3_4)\n h0_4 = np.dot(h0_3, h3_4)\n return h0_4", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def unpack_refinement_params(params):\n intrinsics = params[:10]\n\n # Unpack intrinsics\n alpha, beta, gamma, u_c, v_c, k1, k2, k3 ,p1, p2 = intrinsics\n K = np.array([[alpha, gamma, u_c],\n [ 0., beta, v_c],\n [ 0., 0., 1.]])\n k = np.array([k1, k2, k3, p1, p2])\n\n # Unpack extrinsics\n extrinsic_matrices = []\n for i in range(10, len(params), 6):\n E_rodrigues = params[i:i+6]\n rho_x, rho_y, rho_z, t_x, t_y, t_z = E_rodrigues\n R = cv2.Rodrigues(np.array([rho_x, rho_y, rho_z]))[0]\n t = np.array([t_x, t_y, t_z])\n\n E = np.zeros((3, 4))\n E[:3, :3] = R\n E[:, 3] = t\n\n extrinsic_matrices.append(E)\n\n return K, k, extrinsic_matrices", "def __init__(self, nav_data, div=5, dx=0.0, dy=1.6757135024103853, dth=0.0, pc1=1/3, pc2=9/24, ph=0.2): \n self.div = div\n self.t = np.arange(self.div)/self.div\n self.L = nav_data.loc['Loa']\n self.H = nav_data.loc['B']\n self.T = nav_data.loc['T']\n self.dx = dx \n self.dy = dy\n self.dth = dth\n self.c1 = self.L*pc1\n self.c2 = self.L*pc2\n self.h2 = self.H*ph\n self.Rz = rotation.matrix([0,0,1],dth) \n\n self.LWT = 0.50*nav_data.loc['Delta_m'] \n self.Md = nav_data.loc['Delta_m'] #self.LWT + self.DWT\n self.D = nav_data.loc['De']\n self.G = self.T - self.D \n self.z = nav_data.loc['KG'] - self.D", "def heading(yaw):\n q = euler2quat(0.0, 0.0, yaw)\n quat = Quaternion()\n quat.w = q[0]\n quat.x = q[1]\n quat.y = q[2]\n quat.z = q[3]\n return quat", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}", "def __call__( self , theta ):\r\n offset = np.dot( z_rot( theta ) , [ self.radius , 0 , 0 ] )\r\n # print \"Offset:\" , offset\r\n return np.add( self.center , offset )", "def getTranslationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment in separate array - easier for optimization\n nprojs = len(TiltSeries_._ProjectionList._list)\n self._alignmentTransX = nprojs * [0.]\n self._alignmentTransY = nprojs * [0.]\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n self._alignmentTransX[kk] = proj.getAlignmentTransX()\n self._alignmentTransY[kk] = proj.getAlignmentTransY()\n return self._alignmentTransX, self._alignmentTransY", "def get_x_y_z(drone, p, q, r):\n num_cameras = 2\n camera_constants = [0,math.pi/2]\n rads = np.zeros(num_cameras)\n phis = np.zeros(num_cameras)\n d = np.zeros(num_cameras)\n theta = np.zeros(num_cameras)\n Hs = np.zeros(num_cameras)\n s = 12\n HFOV = math.pi/4\n VFOV = 5*math.pi/36\n HPIX = 1280\n VPIX = 720\n #loop one, where we increment over camera number, and\n # get new information\n\n cent = calculate_centroid(p,q,r)\n for camera_num in range(num_cameras):\n\n A,B = find_a_and_b(p[camera_num],q[camera_num],r[camera_num],cent[camera_num])\n a = find_a(A,B)\n d_in = find_inner_d(a, s)\n angle_c = find_angle_c(a)\n alpha = find_alpha(HFOV, HPIX, A)\n w = find_w(angle_c, s)\n d_out = find_outer_d(w,alpha,a)\n pointy_front = is_point_front(r[camera_num],q[camera_num],p[camera_num],cent[camera_num])\n d[camera_num] = find_d(d_in,d_out,pointy_front)\n theta[camera_num] = find_theta(angle_c,A,B,camera_constants[camera_num])\n k = find_k(drone[camera_num], cent[camera_num])\n angle_k = find_angle_k(k, HFOV, HPIX)\n phi = find_phi(theta[camera_num], angle_k)\n rad = find_r(d[camera_num], angle_k)\n phis[camera_num] = phi\n rads[camera_num] = rad\n\n # end of first loop\n\n cosphis = np.cos(phis)\n sinphis = np.sin(phis)\n big_matrix = np.column_stack((cosphis,sinphis))\n points = np.zeros((int(num_cameras*(num_cameras-1)/2),2))\n i = 0\n for pair in itertools.combinations(range(num_cameras), 2):\n matrix_a = np.vstack((big_matrix[pair[0]],big_matrix[pair[1]]))\n vec_b = np.hstack((rads[pair[0]],rads[pair[1]]))\n point = np.linalg.solve(matrix_a, vec_b)\n points[i] = point\n i += 1\n drone_pos = np.mean(points,axis=0)\n\n # start of third loop\n for camera_num in range(num_cameras):\n d_prime = find_d_prime(d[camera_num], theta[camera_num], drone_pos)\n P,Q,M,N = find_P_Q_M_N(p[camera_num],q[camera_num],r[camera_num])\n h = find_h(d[camera_num],P,Q,M,N)\n angle_4 = find_angle_4(h,d[camera_num])\n Y = find_Y(drone[camera_num], cent[camera_num])\n angle_5 = find_angle_5(Y, VFOV, VPIX)\n angle_6 = angle_5 - angle_4\n h_prime = find_h_prime(d_prime, angle_6)\n Hs[camera_num] = h + h_prime\n drone_h = np.mean(H)\n return np.append(drone_pos,drone_h)", "def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }", "def getParameters(self):\n\n current_params = {'taux': self.taux, 'mu': self.mu, 'G': self.G, 'alpha_0': self.alpha_0,\n 'delta': self.delta, 'p': self.p, 'I0': self.I0, 'kparam': self.kparam}\n\n return (current_params)", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def get_pars(self, idx):\n return np.hstack([self.pars[:4], self.pars[4+idx]])", "def getHeading(q):\n yaw = math.atan2(2 * (q.x * q.y + q.w * q.z),\n q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z)\n return yaw", "def _detectors3(self, hdr):\n # Called AnalysisParam in OpenMIMS, only last part\n d = {}\n d['TIC'] = self._electron_multiplier(hdr)\n\n for n in range(1, 8):\n det = 'Detector {}'.format(n)\n d[det] = {}\n d[det]['fc background setup positive'], \\\n d[det]['fc background setup negative'] = \\\n unpack(self._bo + '2i', hdr.read(8))\n\n for n in range(1, 8):\n det = 'Detector {}'.format(n)\n det_type = unpack(self._bo + 'i', hdr.read(4))[0]\n d[det]['detector'] = _detectors.get(det_type, str(det_type))\n return d", "def test_get_zmat_param_value(self):\n xyz = \"\"\"O -1.56972190 0.86781163 -0.31741697\n C -1.04766895 -0.34174340 0.22855902\n C 0.39334154 -0.53829388 -0.25140232\n N 1.18758060 0.66395027 0.03678474\n H -0.79633969 1.46799803 -0.36543061\n H -1.09144929 -0.26944950 1.32014286\n H -1.68279585 -1.17219329 -0.09269721\n H 0.84136357 -1.41603914 0.22426854\n H 0.39770524 -0.69095962 -1.33583221\n H 2.13746062 0.52142605 -0.30877212\n H 1.27433093 0.77543934 1.04802967\"\"\"\n spc1 = ARCSpecies(label='spc1', xyz=xyz)\n xyz_dict = spc1.get_xyz()\n value1 = converter.get_zmat_param_value(coords=xyz_dict, indices=[1, 2], mol=spc1.mol) # R\n value2 = converter.get_zmat_param_value(coords=xyz_dict, indices=[0, 1, 2], mol=spc1.mol) # A\n value3 = converter.get_zmat_param_value(coords=xyz_dict, indices=[1, 2, 3, 10], mol=spc1.mol) # D\n self.assertAlmostEqual(value1, 1.53150455)\n self.assertAlmostEqual(value2, 109.470340)\n self.assertAlmostEqual(value3, 66.2600849)", "def header_fields():\n fields = {}\n fields['PACKET'] = nacc.uds3.Field(name='PACKET', typename='Char', position=(1, 2), length=2, inclusive_range=None, allowable_values=[], blanks=[])\n fields['FORMID'] = nacc.uds3.Field(name='FORMID', typename='Char', position=(4, 6), length=3, inclusive_range=None, allowable_values=[], blanks=[])\n fields['FORMVER'] = nacc.uds3.Field(name='FORMVER', typename='Num', position=(8, 10), length=3, inclusive_range=(1, 3), allowable_values=['3.1'], blanks=[])\n fields['ADCID'] = nacc.uds3.Field(name='ADCID', typename='Num', position=(12, 13), length=2, inclusive_range=(2, 99), allowable_values=[], blanks=[])\n fields['PTID'] = nacc.uds3.Field(name='PTID', typename='Char', position=(15, 24), length=10, inclusive_range=None, allowable_values=[], blanks=[])\n fields['VISITMO'] = nacc.uds3.Field(name='VISITMO', typename='Num', position=(26, 27), length=2, inclusive_range=(1, 12), allowable_values=[], blanks=[])\n fields['VISITDAY'] = nacc.uds3.Field(name='VISITDAY', typename='Num', position=(29, 30), length=2, inclusive_range=(1, 31), allowable_values=[], blanks=[])\n fields['VISITYR'] = nacc.uds3.Field(name='VISITYR', typename='Num', position=(32, 35), length=4, inclusive_range=(2005, CURRENT_YEAR), allowable_values=[], blanks=[])\n fields['VISITNUM'] = nacc.uds3.Field(name='VISITNUM', typename='Char', position=(37, 39), length=3, inclusive_range=None, allowable_values=[], blanks=[])\n fields['INITIALS'] = nacc.uds3.Field(name='INITIALS', typename='Char', position=(41, 43), length=3, inclusive_range=None, allowable_values=[], blanks=[])\n return fields", "def get_pv_keywords(header):\n cd = np.matrix([[header.get('CD1_1', 0.0), header.get('CD1_2', 0.0)],\n [header.get('CD2_1', 0.0), header.get('CD2_2', 0.0)]], dtype=np.float64)\n pv1 = np.zeros((40,), dtype=np.float64)\n pv2 = np.zeros((40,), dtype=np.float64)\n for k in range(40):\n pv1[k] = header.get('PV1_%d' % k, 0.0)\n pv2[k] = header.get('PV2_%d' % k, 0.0)\n return cd, pv1, pv2", "def get_object_params(self):\n return self.mass, self.x, self.y", "def prodvect3d(self, direction: Direction3D) -> dict:\n\n return {\"vect3D\": self.v3ddict.prodvect3d(direction.v3ddict).getDict()}", "def lattice_settings(self):\n return (self.a, self.b, self.c,\n self.alpha*radians, self.beta*radians, self.gamma*radians)", "def reformat_pose_to_dict(self, now_pose):\n # now_pose è un dict in particolare { pose : [ {position : [{x : value , y:value , z:value} ] } , {orientation : [] } }\n # devo convertire i quaternioni in amgoli di eulero...estrarre i quaternioni da pose_now e convertirli in angoli RPY\n\n lato_corto_2 = 1.65 #1.45 # offset parcheggio\n \n #correggo gli offset x centrare le macchine nei parcheggi\n\n if abs(round(now_pose.position.x,2)) == 22.45:\n if now_pose.position.x < 0 :\n now_pose.position.x+=lato_corto_2\n now_pose.position.y-=0.4\n else :\n now_pose.position.x-=lato_corto_2\n now_pose.position.y+=0.4\n \n if abs(round(now_pose.position.y,2)) == 22.45:\n if now_pose.position.y < 0 :\n now_pose.position.y+=lato_corto_2\n now_pose.position.x+=0.4\n else :\n now_pose.position.y-=lato_corto_2\n now_pose.position.x-=0.4\n\n # correggo la z per renderla uguale all'asfalto che viene spownata nel mondo\n\n offset_asfalto = 0.3\n\n x = now_pose.position.x\n y = now_pose.position.y\n z = now_pose.position.z + offset_asfalto\n\n q1 = now_pose.orientation.x\n q2 = now_pose.orientation.y\n q3 = now_pose.orientation.z\n q4 = now_pose.orientation.w\n\n\n # converto i quaternioni in angoli di rulero RPY in radianti\n orientation_list = [q1,q2,q3,q4]\n\n euler = euler_from_quaternion( orientation_list )\n roll = euler[0]\n pitch = euler[1]\n yaw = round(euler[2],2) + np.pi\n\n\n # creo la lista dei parametri che mi servono nel campo pose:[] del file .yaml\n\n lista_parametri = [x ,y ,z ,roll ,pitch ,yaw ]\n\n # creo un dict con tutti i campi di cui ho bisogno nel file .yaml\n # settare le chiavi 'name' , ' type ' , 'package' , ' pose ' secondo le proprie necessità\n # i due stili sono equivalenti : usare quello preferito\n \"\"\"\n {\"name\" : \"park1\" , \n \"type\" : \"sdf\" , \n \"package\" : \"object_spawner\" , \n \"pose \":self.seq(lista_parametri) \n }\n \n \"\"\"\n lista_veicoli = ['macchina','pickup','ferrari','prius_hybrid','car_lexus','car_polo','car_volvo','car_golf']\n num_veicoli = 1\n\n #modificare qui implementando una funzione randomica se si vogliono piu veicoli casuali spawnati\n elemento_lista = {'name' : lista_veicoli[3],\n 'type': 'sdf',\n 'package': 'object_spawner',\n 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n #\"\"\"\n # elemento_lista = {'name' : 'ferrari',\n # 'type': 'urdf',\n # 'package': 'autopark',\n # 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n\n return elemento_lista", "def compute_heading_from_quaternion(r):\n # quaternion - np.quaternion unit quaternion\n # Real world rotation\n direction_vector = np.array([0, 0, -1]) # Forward vector\n heading_vector = quaternion_rotate_vector(r.inverse(), direction_vector)\n\n phi = -np.arctan2(heading_vector[0], -heading_vector[2]).item()\n return phi", "def motions(self) -> Dict[str, float]:\n return {\n \"yaw\": self.yaw,\n \"pitch\": self.pitch,\n \"roll\": self.roll,\n \"sway\": self.sway,\n \"surge\": self.surge,\n \"heave\": self.heave\n }", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def sph2car(r, theta, phi):\n x = r * np.sin(theta) * np.cos(phi)\n y = r * np.sin(theta) * np.sin(phi)\n z = r * np.cos(theta)\n\n return x, y, z", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def pt3_reader(filename):\n with open(filename, 'rb') as f:\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Binary file header\n header_dtype = np.dtype([\n ('Ident', 'S16' ),\n ('FormatVersion', 'S6' ),\n ('CreatorName', 'S18' ),\n ('CreatorVersion', 'S12' ),\n ('FileTime', 'S18' ),\n ('CRLF', 'S2' ),\n ('Comment', 'S256' ),\n ('NumberOfCurves', 'int32' ),\n ('BitsPerRecord', 'int32' ), # bits in each T3 record\n ('RoutingChannels', 'int32' ),\n ('NumberOfBoards', 'int32' ),\n ('ActiveCurve', 'int32' ),\n ('MeasurementMode', 'int32' ),\n ('SubMode', 'int32' ),\n ('RangeNo', 'int32' ),\n ('Offset', 'int32' ),\n ('AcquisitionTime', 'int32' ), # in ms\n ('StopAt', 'uint32'),\n ('StopOnOvfl', 'int32' ),\n ('Restart', 'int32' ),\n ('DispLinLog', 'int32' ),\n ('DispTimeAxisFrom', 'int32' ),\n ('DispTimeAxisTo', 'int32' ),\n ('DispCountAxisFrom', 'int32' ),\n ('DispCountAxisTo', 'int32' ),\n ])\n header = np.fromfile(f, dtype=header_dtype, count=1)\n\n if header['FormatVersion'][0] != b'2.0':\n raise IOError((\"Format '%s' not supported. \"\n \"Only valid format is '2.0'.\") % \\\n header['FormatVersion'][0])\n\n dispcurve_dtype = np.dtype([\n ('DispCurveMapTo', 'int32'),\n ('DispCurveShow', 'int32')])\n dispcurve = np.fromfile(f, dispcurve_dtype, count=8)\n\n params_dtype = np.dtype([\n ('ParamStart', 'f4'),\n ('ParamStep', 'f4'),\n ('ParamEnd', 'f4')])\n params = np.fromfile(f, params_dtype, count=3)\n\n repeat_dtype = np.dtype([\n ('RepeatMode', 'int32'),\n ('RepeatsPerCurve', 'int32'),\n ('RepeatTime', 'int32'),\n ('RepeatWaitTime', 'int32'),\n ('ScriptName', 'S20' )])\n repeatgroup = np.fromfile(f, repeat_dtype, count=1)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Hardware information header\n hw_dtype = np.dtype([\n ('HardwareIdent', 'S16' ),\n ('HardwarePartNo', 'S8' ),\n ('HardwareSerial', 'int32'),\n ('SyncDivider', 'int32'),\n ('CFDZeroCross0', 'int32'),\n ('CFDLevel0', 'int32'),\n ('CFDZeroCross1', 'int32'),\n ('CFDLevel1', 'int32'),\n ('Resolution', 'f4'),\n ('RouterModelCode', 'int32'),\n ('RouterEnabled', 'int32')])\n hardware = np.fromfile(f, hw_dtype, count=1)\n\n rtr_dtype = np.dtype([\n ('InputType', 'int32'),\n ('InputLevel', 'int32'),\n ('InputEdge', 'int32'),\n ('CFDPresent', 'int32'),\n ('CFDLevel', 'int32'),\n ('CFDZCross', 'int32')])\n router = np.fromfile(f, rtr_dtype, count=4)\n\n # Time tagging mode specific header\n ttmode_dtype = np.dtype([\n ('ExtDevices', 'int32' ),\n ('Reserved1', 'int32' ),\n ('Reserved2', 'int32' ),\n ('InpRate0', 'int32' ),\n ('InpRate1', 'int32' ),\n ('StopAfter', 'int32' ),\n ('StopReason', 'int32' ),\n ('nRecords', 'int32' ),\n ('ImgHdrSize', 'int32')])\n ttmode = np.fromfile(f, ttmode_dtype, count=1)\n\n # Special header for imaging. How many of the following ImgHdr\n # array elements are actually present in the file is indicated by\n # ImgHdrSize above.\n ImgHdr = np.fromfile(f, dtype='int32', count=ttmode['ImgHdrSize'][0])\n\n # The remainings are all T3 records\n t3records = np.fromfile(f, dtype='uint32', count=ttmode['nRecords'][0])\n\n timestamps_unit = 1./ttmode['InpRate0']\n nanotimes_unit = 1e-9*hardware['Resolution']\n\n metadata = dict(header=header, dispcurve=dispcurve, params=params,\n repeatgroup=repeatgroup, hardware=hardware,\n router=router, ttmode=ttmode, imghdr=ImgHdr)\n return t3records, timestamps_unit, nanotimes_unit, metadata", "def nfw_physical2angle_fromNFWparams(self, rhos, rs, z):\n\n D_d = self.cosmo.D_A_z(z)\n Rs_angle = rs / D_d / self.cosmo.arcsec # Rs in arcsec\n theta_Rs = rhos * (4 * rs ** 2 * (1 + numpy.log(1. / 2.)))\n eps_crit = self.get_sigma_crit_lensing(z, self.z_source)\n\n return Rs_angle, theta_Rs / eps_crit / D_d / self.cosmo.arcsec", "def rotation_matrix3(axis, theta):\n R = np.eye(3)\n c = math.cos(theta)\n s = math.sin(theta)\n a1 = (axis + 1) % 3\n a2 = (axis + 2) % 3\n R[a1, a1] = c\n R[a1, a2] = -s\n R[a2, a1] = s\n R[a2, a2] = c\n return np.matrix(R)", "def get_packed(self, use_sqrt=None):\n\n if (use_sqrt is None) or (use_sqrt == self.use_sqrt):\n return self._params\n\n pa = self._params.copy()\n cov_re = self.get_cov_re()\n\n if use_sqrt:\n L = np.linalg.cholesky(cov_re)\n pa[self.k_fe:] = L[self._ix]\n else:\n pa[self.k_fe:] = cov_re[self._ix]\n\n return pa", "def compose_transform3(phi1,theta1,psi1,sx1,sy1,sz1,scale1,phi2,theta2,psi2,sx2,sy2,sz2,scale2):\n\n\tR1 = Transform({\"type\":\"spider\",\"phi\":float(phi1),\"theta\":float(theta1),\"psi\":float(psi1),\"tx\":float(sx1),\"ty\":float(sy1),\"tz\":float(sz1),\"mirror\":0,\"scale\":float(scale1)})\n\tR2 = Transform({\"type\":\"spider\",\"phi\":float(phi2),\"theta\":float(theta2),\"psi\":float(psi2),\"tx\":float(sx2),\"ty\":float(sy2),\"tz\":float(sz2),\"mirror\":0,\"scale\":float(scale2)})\n\tRcomp=R2*R1\n\td = Rcomp.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"scale\"]", "def _create_quaternion(direction, up) -> Tuple[float, float, float, float]:\n direction = direction / spy.vnorm(direction)\n up = up / spy.vnorm(up)\n\n x = spy.vcrss(up, direction)\n x = x / spy.vnorm(x)\n y = spy.vcrss(direction, x)\n y = y / spy.vnorm(y)\n z = direction\n\n r = sqrt(1.0 + x[0] + y[1] + z[2]) * 0.5\n i = (y[2] - z[1]) / (4 * r)\n j = (z[0] - x[2]) / (4 * r)\n k = (x[1] - y[0]) / (4 * r)\n\n return r, i, j, k", "def info(self):\n tline = \"\"\n for (ii, projection) in enumerate(self._ProjectionList):\n tiltAngle = projection._tiltAngle\n transX = -projection._alignmentTransX\n transY = -projection._alignmentTransY\n rot = -(projection._alignmentRotation + 90.)\n mag = projection._alignmentMagnification\n tline = tline + (\"%3d: \" % ii)\n tline = tline + (\"%15s; \" % projection._filename)\n tline = tline + (\"tiltAngle=%9.3f; \" % tiltAngle)\n tline = tline + (\"transX=%9.3f; \" % transX)\n tline = tline + (\"transY=%9.3f; \" % transY)\n tline = tline + (\"rot=%9.3f; \" % rot)\n tline = tline + (\"mag=%9.3f\\n\" % mag)\n print(tline)", "def predict_qth(self):\n return (self.latitude, -self.longitude, self.altitude)" ]
[ "0.6369768", "0.63076174", "0.57975394", "0.5729281", "0.5533436", "0.551851", "0.5500369", "0.5439547", "0.5422399", "0.54033196", "0.5388639", "0.5361121", "0.53491545", "0.53419423", "0.531873", "0.53108954", "0.53095436", "0.53085315", "0.5303664", "0.5303664", "0.5302264", "0.5296609", "0.52865446", "0.52786803", "0.52607256", "0.5229555", "0.5214137", "0.52101487", "0.51915", "0.5185534", "0.518414", "0.5165979", "0.5156577", "0.51527846", "0.5146003", "0.51442945", "0.5134576", "0.5131072", "0.5129786", "0.51275796", "0.5122988", "0.5112966", "0.5108733", "0.5089468", "0.508251", "0.50711954", "0.5051154", "0.50446564", "0.5044415", "0.5032898", "0.5031905", "0.5028995", "0.5021214", "0.5020079", "0.50082093", "0.5000527", "0.49968827", "0.49931118", "0.49894583", "0.4987589", "0.49777326", "0.49609298", "0.4951972", "0.4951259", "0.49468526", "0.49392393", "0.49372277", "0.49326003", "0.49290374", "0.49288535", "0.49280724", "0.49230292", "0.49178606", "0.49075556", "0.4907508", "0.4907508", "0.49033114", "0.4899233", "0.48989552", "0.4898425", "0.48846504", "0.48798487", "0.48764366", "0.48741853", "0.48724195", "0.48638257", "0.48576346", "0.4855671", "0.48423037", "0.48419914", "0.48413435", "0.48397115", "0.48372066", "0.48353264", "0.48327672", "0.48294368", "0.48248768", "0.48238757", "0.48187616", "0.48144493" ]
0.7157893
0
set 3D alignment parameters in the header phi theta psi tx ty tz mirror scale
def set_params3D(ima, p, xform = "xform.align3d"): t = Transform({"type":"spider","phi":p[0],"theta":p[1],"psi":p[2],"tx":p[3],"ty":p[4],"tz":p[5],"mirror":p[6],"scale":p[7]}) ima.set_attr(xform, t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def setCameraRotation3D(ang):\n dislin.vup3d(ang)", "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr", "def set_physical_params(self, params):\n self.M500 = params[0]\n self.r500 = params[1]\n self.z = params[2]", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def parameters_ui(layout, params):\n\n r = layout.row()\n r.prop(params, \"rotation_axis\")\n\n if 'auto' not in params.rotation_axis.lower():\n r = layout.row()\n text = \"Auto align Foot\"\n r.prop(params, \"auto_align_extremity\", text=text)\n\n r = layout.row()\n r.prop(params, \"segments\")\n\n r = layout.row()\n r.prop(params, \"bbones\")\n\n bone_layers = bpy.context.active_pose_bone.bone.layers[:]\n\n for layer in ['fk', 'tweak']:\n r = layout.row()\n r.prop(params, layer + \"_extra_layers\")\n r.active = params.tweak_extra_layers\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(16, 24):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8, 16):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(24, 32):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def set_params(self):\n \n lo, hi = self.R.get((self.h, self.w, self.m), (0.0, 0.0))\n params.update({\n 'gamma' : 1.0, # minesweeper is a finite horizon game\n 'epsilon': 0.0,\n 'K': 16,\n 'R_lo': lo,\n 'R_hi': hi,\n 'max_depth': self.h * self.w / 2,\n 'c':hi-lo\n })", "def set_homog_trans_mtx(x: float, y: float, z: float, mtx: numpy.ndarray):\n mtx[0][3] = x\n mtx[1][3] = y\n mtx[2][3] = z", "def setOptimizableVariables(self, TiltAlignmentParameters_, optimizableVariables):\n ntilt = self._ntilt\n nmark = len(self._Markers)\n\n nopti = (nmark - 1) * 3\n\n if self.optimizeMarkerPositions:\n # translation\n nopti += (ntilt) * 2\n\n # variable magnifications for projections, exclude scaling of reference image (S==1)\n if TiltAlignmentParameters_.dmag:\n nopti += ntilt - 1\n\n #variable rotation for projections\n if TiltAlignmentParameters_.drot:\n nopti += ntilt\n else:\n nopti += 1\n\n # beam tilt\n if TiltAlignmentParameters_.dbeam:\n nopti += 1\n\n # nopti += ntilt\n ## gradient on image rotation and magnification in projections\n #if TiltAlignmentParameters_.dGradRotMag:\n # nopti = nopti + 2\n\n # check that number of variables is ok\n if len(optimizableVariables) != nopti:\n print(\"Length optimizableVariables: \" + str(len(optimizableVariables)))\n print(\"N optmization: \" + str(nopti))\n raise IndexError('length of optimizableVariables does not match TiltAlignmentParameters')\n\n # marker 3D coords\n ivar = 0\n\n\n for (imark, Marker) in enumerate(self._Markers):\n # reference marker irefmark is fixed to standard value\n if ((imark ) != TiltAlignmentParameters_.irefmark):\n r = numpy.array([optimizableVariables[ivar],\n optimizableVariables[ivar + 1], optimizableVariables[ivar + 2]])\n self._Markers[imark].set_r(r)\n\n ivar = ivar + 3\n\n\n if self.optimizeMarkerPositions:\n # translations\n for itilt in range(0, ntilt):\n # translation in reference projection is zero\n #FFif (self._projIndices[itilt] != TiltAlignmentParameters_.ireftilt):\n self._alignmentTransX[itilt] = optimizableVariables[ivar]\n self._alignmentTransY[itilt] = optimizableVariables[ivar + 1]\n ivar = ivar + 2\n\n\n\n # magnification changes\n if TiltAlignmentParameters_.dmag:\n for itilt in range(0, ntilt):\n # magnification of reference projection is 1.\n if (int(self._projIndices[itilt]) != int(self._projIndices[self.ireftilt])):\n self._alignmentMagnifications[itilt] = optimizableVariables[ivar]\n ivar = ivar + 1\n\n # image rotations\n if TiltAlignmentParameters_.drot:\n for itilt in range(0, ntilt):\n self._alignmentRotations[itilt] = optimizableVariables[ivar]\n ivar = ivar + 1\n # all rotations are the same - take the first one\n else:\n self._alignmentRotations[0] = optimizableVariables[ivar]\n ivar = ivar + 1\n\n\n\n # beam inclination\n if TiltAlignmentParameters_.dbeam:\n self._alignmentBeamTilt = optimizableVariables[ivar]\n ivar = ivar + 1\n\n # focus gradient (TODO)\n #if TiltAlignmentParameters_.dGradRotMag:\n # optimizableVariables[ivar] = self._alignmentMagnFoc\n # optimizableVariables[ivar+1] = self._alignmentRotFoc\n\n\n if not self.optimizeMarkerPositions:\n from pytom.scripts.Rotation_function import calculate_translation\n\n\n # r_model is the modelled x,y,z coordinate of the reference marker\n r_model = self._Markers[self.irefmark].get_r()\n\n # if using a reduced set using an indices existing in the reduced set\n # i = int(numpy.argwhere(self.TiltSeries_._projIndices.astype(int) == self.TiltSeries_._TiltAlignmentParas.ireftilt)[0][0])\n psi_ref = numpy.deg2rad(numpy.mean(self._alignmentRotations) + 90)\n\n for iproj in range(0,ntilt):\n # setting variables\n marker = self._Markers[self.irefmark]\n r_exp_tilt = numpy.array([marker.get_xProj(iproj), marker.get_yProj(iproj)]) - numpy.array(\n self.TiltSeries_._TiltAlignmentParas.cent)\n psi_itilt = numpy.deg2rad(self._alignmentRotations[iproj] + 90)\n theta_itilt = numpy.deg2rad(self._tiltAngles[iproj])\n magnification =self._alignmentMagnifications[iproj]\n\n # calculating translation setting difference model and experimental reference marker point at 0\n tx, ty = calculate_translation(r_model, r_exp_tilt, psi_ref, psi_itilt, theta_itilt, magnification)\n\n\n self._alignmentTransX[iproj] = tx\n self._alignmentTransY[iproj] = ty\n\n\n\n # print(self.irefmark, self._alignmentTransX[self.ireftilt], self._alignmentTransY[self.ireftilt])\n # for itilt in range(ntilt):\n # self.q[itilt] = optimizableVariables[ivar]\n # ivar += 1", "def setViewAngle3D(ang):\n dislin.vang3d(ang)", "def set_phi(self):\n self.phi = float(dihedral(self.O5.getXYZ(), self.C1.getXYZ(), self.GO.getXYZ(), self.CX.getXYZ()))", "def __init__(self, nav_data, div=5, dx=0.0, dy=1.6757135024103853, dth=0.0, pc1=1/3, pc2=9/24, ph=0.2): \n self.div = div\n self.t = np.arange(self.div)/self.div\n self.L = nav_data.loc['Loa']\n self.H = nav_data.loc['B']\n self.T = nav_data.loc['T']\n self.dx = dx \n self.dy = dy\n self.dth = dth\n self.c1 = self.L*pc1\n self.c2 = self.L*pc2\n self.h2 = self.H*ph\n self.Rz = rotation.matrix([0,0,1],dth) \n\n self.LWT = 0.50*nav_data.loc['Delta_m'] \n self.Md = nav_data.loc['Delta_m'] #self.LWT + self.DWT\n self.D = nav_data.loc['De']\n self.G = self.T - self.D \n self.z = nav_data.loc['KG'] - self.D", "def set_trans(self, head_mri_trans):\n x, y, z = -self.mri_origin[0]\n mri_tgt_trans = translation(x, y, z)\n head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)\n\n x, y, z = self.hsp.nasion[0]\n src_hsp_trans = translation(x, y, z)\n src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)\n\n rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])\n x, y, z = src_tgt_trans[:3, 3]\n\n self.rot_x = rot_x\n self.rot_y = rot_y\n self.rot_z = rot_z\n self.trans_x = x\n self.trans_y = y\n self.trans_z = z", "def set_MRI_orientation(self):\n\n if self.has_axes(MRI3Daxes):\n orientation = MRI3Daxes[:]\n if self.has_axis('time'):\n orientation += ['time']\n if self.has_axis('iteration'):\n orientation += ['iteration']\n if self.has_axis('condition'):\n orientation += ['condition']\n\n orientation += sorted(set(self.axes_names).difference(orientation))\n\n self.set_orientation(orientation)", "def _calculate_parameters(self, thickness: int = 10):\n\n self.thickness = thickness\n\n # set orientation dependent parameters: (different for x, y, z-PML)\n # NOTE: these methods are implemented by the subclasses of PML.\n self._set_locations()\n self._set_shape()\n self._set_sigmaE()\n self._set_sigmaH()\n\n # set the other parameters\n Nx, Ny, Nz = self.shape # is defined by _set_shape()\n self.phi_E = bd.zeros((Nx, Ny, Nz, 3))\n self.phi_H = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ex = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ey = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ez = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hx = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hy = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hz = bd.zeros((Nx, Ny, Nz, 3))\n\n self.bE = bd.exp(-(self.sigmaE / self.k + self.a) * self.grid.courant_number)\n self.cE = (\n (self.bE - 1.0)\n * self.sigmaE # is defined by _set_sigmaE()\n / (self.sigmaE * self.k + self.a * self.k ** 2)\n )\n\n self.bH = bd.exp(-(self.sigmaH / self.k + self.a) * self.grid.courant_number)\n self.cH = (\n (self.bH - 1.0)\n * self.sigmaH # is defined by _set_sigmaH()\n / (self.sigmaH * self.k + self.a * self.k ** 2)\n )", "def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]", "def set_position(self, x, y, z):\n for sec in self.all:\n for i in range(int(nrn.n3d())):\n nrn.pt3dchange(i, \\\n x-self.x+nrn.x3d(i), \\\n y-self.y+nrn.y3d(i), \\\n z-self.z+nrn.z3d(i), \\\n nrn.diam3d(i))\n self.x = x; self.y = y; self.z = z", "def setAxisLengths3D(x=2.,y=2.,z=2.):\n dislin.axis3d(x,y,z)", "def __init__(self, orientation = None, translation = None, panelgroup = None, homogenous_transformation = None, name = None):\n self.include_translation = True\n self.name = name\n\n if panelgroup is not None:\n d_mat = panelgroup.get_local_d_matrix()\n fast = matrix.col((d_mat[0],d_mat[3],d_mat[6])).normalize()\n slow = matrix.col((d_mat[1],d_mat[4],d_mat[7])).normalize()\n orig = matrix.col((d_mat[2],d_mat[5],d_mat[8]))\n\n v3 = fast.cross(slow).normalize()\n\n r3 = matrix.sqr((fast[0],slow[0],v3[0],\n fast[1],slow[1],v3[1],\n fast[2],slow[2],v3[2]))\n\n self.orientation = r3.r3_rotation_matrix_as_unit_quaternion()\n self.translation = orig\n\n if not self.name:\n self.name = panelgroup.get_name()\n\n elif orientation is not None or translation is not None:\n assert orientation is not None and translation is not None\n self.orientation = orientation\n self.translation = translation\n\n else:\n # Decompose the homegenous transformation assuming no scale factors were used\n h = homogenous_transformation\n self.orientation = matrix.sqr((h[0],h[1],h[2],\n h[4],h[5],h[6],\n h[8],h[9],h[10])).r3_rotation_matrix_as_unit_quaternion()\n self.translation = matrix.col((h[3],\n h[7],\n h[11]))\n assert h[12] == h[13] == h[14] == 0 and h[15] == 1", "def _mat3(self):\n if self.frame.orientation == HillFrame.DEFAULT_ORIENTATION:\n return np.identity(3)\n else:\n return self.QSW2TNW", "def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [3./self.lengthscale**2, 2*np.sqrt(3)/self.lengthscale, 1.]\r\n self.b = [1,self.lengthscale**2/3]\r\n\r\n self.basis_alpha = np.ones((self.n_basis,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)", "def __setup_parameters__(self):\r\n self.M=self.N+1\r\n self.u=1+self.pu\r\n self.d=1-self.pd\r\n self.qu=(math.exp((self.r-self.div)*self.dt)-self.d)/(self.u-self.d)\r\n self.qd=1-self.qu", "def set_phi(self,phi):\n\t\tr=self.r\n\t\tself.x = np.cos(np.deg2rad(phi))*r\n\t\tself.y = np.sin(np.deg2rad(phi))*r", "def __init__(self):\n Page.__init__(self, u\"Esfera, parametrización por proyecciones estereográficas\")\n\n r = .998\n esf = ParametricPlot3D(lambda t, f: (r * sin(t) * cos(f), r * sin(t) * sin(f), r * cos(t)), (0, pi, 70), (0, 2 * pi, 70))\n# esf.setAmbientColor(_1(99,136,63))\n esf.setDiffuseColor(_1(99, 136, 63))\n esf.setSpecularColor(_1(99, 136, 63))\n\n\n def proyZm1(u, v, t1):\n \"\"\"proy desde el polo norte al plano z=-1\"\"\"\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)\n\n def proyZ1(u, v, t2):\n \"\"\"proy desde el polo sur al plano z=1\"\"\"\n den = u ** 2 + v ** 2 + 4\n x = u - t2 * (u - 4 * u / den)\n y = v - t2 * (v - 4 * v / den)\n z = 1 - t2 * (2 - 8 / den)\n return (x, y, z)\n\n stereo = ParametricPlot3D(proyZm1, (-3, 3, 70), (-3, 3, 70))\n stereo.setLinesVisible(True)\n stereo.setMeshVisible(False)\n stereo.setMeshDiffuseColor(_1(117, 55, 79))\n\n stereo2 = ParametricPlot3D(proyZ1, (-3, 3, 70), (-3, 3, 70))\n stereo2.setLinesVisible(True)\n stereo2.setMeshVisible(False)\n stereo2.setMeshDiffuseColor(_1(80, 87, 193))\n stereo2.setTransparency(0.5)\n stereo2.setTransparencyType(8)\n\n\n baseplane = BasePlane()\n baseplane.setHeight(-1.005)\n baseplane.setRange((-4, 4, 7))\n self.addChild(esf)\n self.addChild(stereo2)\n self.addChild(stereo)\n self.addChild(baseplane)\n\n params = [stereo,stereo2]\n\n ## no queremos los controles\n for i,p in enumerate(params):\n p.parameters['t%d' % (i+1)].hide()\n\n anims = [p.parameters['t%d' % (i+1)].asAnimation() for i,p in enumerate(params)]\n self.setupAnimations(anims)", "def htm0_3(joint_rotations):\n # H0_1\n r0_1 = np.dot(rot_x(90), rot_y(joint_rotations[0]))\n d0_1 = transl(0, 0, a1)\n h0_1 = htm(r0_1, d0_1)\n\n # H1_2\n r1_2 = rot_z(joint_rotations[1])\n x1_2 = a2*np.cos(np.radians(joint_rotations[1]))\n y1_2 = a2*np.sin(np.radians(joint_rotations[1]))\n z1_2 = 0\n d1_2 = transl(x1_2, y1_2, z1_2)\n h1_2 = htm(r1_2, d1_2)\n\n # H2_3\n r2_3 = rot_z(joint_rotations[2])\n x2_3 = a3*np.cos(np.radians(joint_rotations[2]))\n y2_3 = a3*np.sin(np.radians(joint_rotations[2]))\n z2_3 = 0\n d2_3 = transl(x2_3, y2_3, z2_3)\n h2_3 = htm(r2_3, d2_3)\n\n # H0_3\n h0_2 = np.dot(h0_1, h1_2)\n h0_3 = np.dot(h0_2, h2_3)\n return h0_3", "def create_e3d_file(self,path='./'):\n dt=0.606*self.model_parameters['dh']/np.max(self.velocity_model['vp']) # dt needs to satify the courant condition\n t=int(self.model_parameters['duration']/dt)\n \n # Check path exists, if not create one\n if not os.path.exists(path):\n os.makedirs(path)\n \n # Create e3d parameter file\n f=open('%s%s_e3dmodel.txt'%(path,self.model_name),'w')\n f.write(\"grid x=%s z=%s dh=%s b=2 q=1\\ntime dt=%0.5f t=%s\\n\"%(self.model_parameters['xmax'],self.model_parameters['zmax'],self.model_parameters['dh'],dt,t))\n f.write(\"block p=%s s=%s r=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][0],self.velocity_model['vs'][0],self.velocity_model['rho'][0]))\n \n for i in range(1,len(self.velocity_model['vp'])-1):\n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][i],self.velocity_model['vs'][i],self.velocity_model['rho'][i],\n self.velocity_model['depth'][i],self.velocity_model['depth'][i+1]))\n \n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\\n\"%(self.velocity_model['vp'][i+1],self.velocity_model['vs'][i+1],self.velocity_model['rho'][i+1],\n self.velocity_model['depth'][i+1],self.model_parameters['zmax'])) # extend to the based of the model \n \n f.write(\"visual movie=5\\n\\n\")\n\n if self.source['src_type']!=4:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'])) \n else:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s Mxx=%s Myy=%s Mzz=%s Mxy=%s Mxz=%s Myz=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'],self.source['mt'][0],self.source['mt'][1],self.source['mt'][2],self.source['mt'][3],self.source['mt'][4],self.source['mt'][5])) \n\n for r in range(len(self.receivers['recxs'])):\n f.write('sac x=%0.3f z=%0.3f file=%s\\n'%(self.receivers['recxs'][r],self.receivers['reczs'][r],self.model_name))\n\n f.write(\"visual sample=0.1 movie=1 scale=10000000000/n\")\n f.close()\n \n print('File created: %s%s_e3dmodel.txt'%(path,self.model_name))", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def set_params(self, params):\n params = dict_to_namespace(params)\n\n # Set self.params\n self.params = Namespace()\n self.params.ndimx = params.ndimx\n self.params.model_str = getattr(params, 'model_str', 'optfixedsig')\n self.params.ig1 = getattr(params, 'ig1', 4.0)\n self.params.ig2 = getattr(params, 'ig2', 3.0)\n self.params.n1 = getattr(params, 'n1', 1.0)\n self.params.n2 = getattr(params, 'n2', 1.0)\n self.params.sigma = getattr(params, 'sigma', 1e-5)\n self.params.niter = getattr(params, 'niter', 70)\n self.params.kernel = getattr(params, 'kernel', kern_matern)\n self.params.trans_x = getattr(params, 'trans_x', False)", "def _write_header(self, header):\n # write out telescope and source information\n header[\"latitude\"] = self.telescope_location_lat_lon_alt_degrees[0]\n header[\"longitude\"] = self.telescope_location_lat_lon_alt_degrees[1]\n header[\"altitude\"] = self.telescope_location_lat_lon_alt_degrees[2]\n header[\"telescope_name\"] = np.string_(self.telescope_name)\n header[\"instrument\"] = np.string_(self.instrument)\n header[\"object_name\"] = np.string_(self.object_name)\n\n # write out required UVParameters\n header[\"Nants_data\"] = self.Nants_data\n header[\"Nants_telescope\"] = self.Nants_telescope\n header[\"Nbls\"] = self.Nbls\n header[\"Nblts\"] = self.Nblts\n header[\"Nfreqs\"] = self.Nfreqs\n header[\"Npols\"] = self.Npols\n header[\"Nspws\"] = self.Nspws\n header[\"Ntimes\"] = self.Ntimes\n header[\"antenna_numbers\"] = self.antenna_numbers\n header[\"uvw_array\"] = self.uvw_array\n header[\"vis_units\"] = np.string_(self.vis_units)\n header[\"channel_width\"] = self.channel_width\n header[\"time_array\"] = self.time_array\n header[\"freq_array\"] = self.freq_array\n header[\"integration_time\"] = self.integration_time\n header[\"lst_array\"] = self.lst_array\n header[\"polarization_array\"] = self.polarization_array\n header[\"spw_array\"] = self.spw_array\n header[\"ant_1_array\"] = self.ant_1_array\n header[\"ant_2_array\"] = self.ant_2_array\n header[\"antenna_positions\"] = self.antenna_positions\n\n # handle antenna_names; works for lists or arrays\n header[\"antenna_names\"] = np.asarray(self.antenna_names, dtype=\"bytes\")\n\n # write out phasing information\n header[\"phase_type\"] = np.string_(self.phase_type)\n if self.phase_center_ra is not None:\n header[\"phase_center_ra\"] = self.phase_center_ra\n if self.phase_center_dec is not None:\n header[\"phase_center_dec\"] = self.phase_center_dec\n if self.phase_center_epoch is not None:\n header[\"phase_center_epoch\"] = self.phase_center_epoch\n if self.phase_center_frame is not None:\n header[\"phase_center_frame\"] = np.string_(self.phase_center_frame)\n\n # write out optional parameters\n if self.dut1 is not None:\n header[\"dut1\"] = self.dut1\n if self.earth_omega is not None:\n header[\"earth_omega\"] = self.earth_omega\n if self.gst0 is not None:\n header[\"gst0\"] = self.gst0\n if self.rdate is not None:\n header[\"rdate\"] = np.string_(self.rdate)\n if self.timesys is not None:\n header[\"timesys\"] = np.string_(self.timesys)\n if self.x_orientation is not None:\n header[\"x_orientation\"] = np.string_(self.x_orientation)\n if self.blt_order is not None:\n header[\"blt_order\"] = np.string_(\", \".join(self.blt_order))\n if self.antenna_diameters is not None:\n header[\"antenna_diameters\"] = self.antenna_diameters\n if self.uvplane_reference_time is not None:\n header[\"uvplane_reference_time\"] = self.uvplane_reference_time\n if self.eq_coeffs is not None:\n header[\"eq_coeffs\"] = self.eq_coeffs\n if self.eq_coeffs_convention is not None:\n header[\"eq_coeffs_convention\"] = np.string_(self.eq_coeffs_convention)\n\n # write out extra keywords if it exists and has elements\n if self.extra_keywords:\n extra_keywords = header.create_group(\"extra_keywords\")\n for k in self.extra_keywords.keys():\n if isinstance(self.extra_keywords[k], str):\n extra_keywords[k] = np.string_(self.extra_keywords[k])\n else:\n extra_keywords[k] = self.extra_keywords[k]\n\n # write out history\n header[\"history\"] = np.string_(self.history)\n\n return", "def update_Element_Perturb_Params(self, shift_y, shift_z, rot_angle_y, rot_angle_z):\n raise NotImplementedError", "def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [1./self.lengthscale, 1.]\r\n self.b = [1]\r\n\r\n self.basis_alpha = np.ones((self.n_basis,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)", "def define_lattice_parameters(self, lattice=True):\n if self.symmetry == 'cubic':\n self.parameters.add('a', self.a, vary=lattice)\n elif self.symmetry == 'tetragonal' or self.symmetry == 'hexagonal':\n self.parameters.add('a', self.a, vary=lattice)\n self.parameters.add('c', self.c, vary=lattice)\n elif self.symmetry == 'orthorhombic':\n self.parameters.add('a', self.a, vary=lattice)\n self.parameters.add('b', self.b, vary=lattice)\n self.parameters.add('c', self.c, vary=lattice)\n elif self.symmetry == 'monoclinic':\n self.parameters.add('a', self.a, vary=lattice)\n self.parameters.add('b', self.b, vary=lattice)\n self.parameters.add('c', self.c, vary=lattice)\n self.parameters.add('beta', self.beta, vary=lattice)\n else:\n self.parameters.add('a', self.a, vary=lattice)\n self.parameters.add('b', self.b, vary=lattice)\n self.parameters.add('c', self.c, vary=lattice)\n self.parameters.add('alpha', self.alpha, vary=lattice)\n self.parameters.add('beta', self.beta, vary=lattice)\n self.parameters.add('gamma', self.gamma, vary=lattice)", "def trans_setup():\n # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)\n # Be Be Be Be Be Be Be lens material\n # 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm]\n # 1 1 5 8 4 2 1 number of lenses\n lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5]\n lens_mat=['Be','Be','Be','Be','Be','Be','Be']\n lens_N=[1,2,4,8,5,1,1]\n trans_pos=[35.2,35.8]\n return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}", "def defineThetaParams(self,param_list,value_list):\n for param,value in zip(param_list,value_list):\n _p=param.split()\n if len(_p)==2:\n self.setMorphParam(_p,value,self.theta_params)\n elif len(_p)==3:\n self.setChannelParam(_p,value,self.theta_params)\n else:\n raise RuntimeError", "def make_wcsheader(ra=40.07293, dec=-1.6137748, size=2, pixscale=0.1, get_hdu=False, theta=0):\n \n if np.isscalar(pixscale):\n cdelt = [pixscale/3600.]*2\n else:\n cdelt = [pixscale[0]/3600., pixscale[1]/3600.]\n \n if np.isscalar(size):\n npix = np.cast[int]([size/pixscale, size/pixscale])\n else:\n npix = np.cast[int]([size[0]/pixscale, size[1]/pixscale])\n \n hout = pyfits.Header()\n hout['CRPIX1'] = npix[0]/2\n hout['CRPIX2'] = npix[1]/2\n hout['CRVAL1'] = ra\n hout['CRVAL2'] = dec\n hout['CD1_1'] = -cdelt[0]\n hout['CD1_2'] = hout['CD2_1'] = 0.\n hout['CD2_2'] = cdelt[1]\n hout['NAXIS1'] = npix[0]\n hout['NAXIS2'] = npix[1]\n hout['CTYPE1'] = 'RA---TAN'\n hout['CTYPE2'] = 'DEC--TAN'\n \n wcs_out = pywcs.WCS(hout)\n \n theta_rad = np.deg2rad(theta)\n mat = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n\n rot_cd = np.dot(mat, wcs_out.wcs.cd)\n \n for i in [0,1]:\n for j in [0,1]:\n hout['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n wcs_out.wcs.cd[i,j] = rot_cd[i,j]\n \n cd = wcs_out.wcs.cd\n wcs_out.pscale = get_wcs_pscale(wcs_out) #np.sqrt((cd[0,:]**2).sum())*3600.\n \n if get_hdu:\n hdu = pyfits.ImageHDU(header=hout, data=np.zeros((npix[1], npix[0]), dtype=np.float32))\n return hdu\n else:\n return hout, wcs_out", "def write_settings(self, settings_file):\n lines = []\n lines.append(f'parameters.pixelSize = {self.pixel_size};')\n lines.append(f'parameters.wavelength = {self.wavelength};')\n lines.append(f'parameters.distance = {self.distance};')\n lines.append(f'parameters.unitCell = {list(self.lattice_settings)};')\n lines.append(f'parameters.ubMat = {str(self.UBmat.tolist())};')\n lines.append(f'parameters.oMat = {str(self.Omat.tolist())};')\n lines.append('parameters.oVec = [0,0,0];')\n lines.append(f'parameters.det0x = {self.xc};')\n lines.append(f'parameters.det0y = {self.yc};')\n lines.append('parameters.xTrans = [0,0,0];')\n lines.append(\n f'parameters.orientErrorDetPitch = {self.pitch * radians};')\n lines.append(f'parameters.orientErrorDetRoll = {self.roll * radians};')\n lines.append(f'parameters.orientErrorDetYaw = {self.yaw * radians};')\n lines.append(\n f'parameters.orientErrorGonPitch = {self.theta * radians};')\n lines.append('parameters.twoThetaCorrection = 0;')\n lines.append(f'parameters.twoThetaNom = 0;')\n lines.append(f'parameters.twoThetaStep = 0;')\n lines.append('parameters.omegaCorrection = 0;')\n lines.append(f'parameters.omegaNom = {self.omega * radians};')\n lines.append(f'parameters.omegaStep = 0;')\n lines.append('parameters.chiCorrection = 0;')\n lines.append(f'parameters.chiNom = {self.chi * radians};')\n lines.append(f'parameters.chiStep = 0;')\n lines.append('parameters.phiCorrection = 0;')\n lines.append(f'parameters.phiNom = {self.phi * radians};')\n lines.append(f'parameters.phiStep = {self.phi_step * radians};')\n lines.append(f'parameters.gridOrigin = {self.grid_origin};')\n lines.append(f'parameters.gridBasis = {self.grid_basis};')\n lines.append(f'parameters.gridDim = {self.grid_step};')\n lines.append('parameters.gridOffset = [0,0,0];')\n lines.append('parameters.extraFlip = false;')\n lines.append(f'outputData.dimensions = {list(self.grid_shape)};')\n lines.append('outputData.chunkSize = [50,50,50];')\n lines.append('outputData.compression = 0;')\n lines.append('transformer.transformOptions = 0;')\n lines.append('transformer.oversampleX = 1;')\n lines.append('transformer.oversampleY = 1;')\n lines.append('transformer.oversampleZ = 4;')\n with open(settings_file, 'w') as f:\n f.write('\\n'.join(lines))", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def load_phi(self, **kwargs):\r\n msf12 = kwargs['msf12']\r\n msf06 = kwargs['msf06']\r\n\r\n self.phi = PHI(msf12, msf06)\r\n self.zeta = 0.01 * np.ones([self.phi.num_modes])", "def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [5*np.sqrt(5)/self.lengthscale**3, 15./self.lengthscale**2,3*np.sqrt(5)/self.lengthscale, 1.]\r\n self.b = [9./8, 9*self.lengthscale**4/200., 3*self.lengthscale**2/5., 3*self.lengthscale**2/(5*8.), 3*self.lengthscale**2/(5*8.)]\r\n\r\n self.basis_alpha = np.ones((2*self.n_freq,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)", "def _initialize_headers(self, n_atoms, title, parameters, set_environment, set_coordinates, set_time, set_cell,\n set_velocities, set_kineticEnergy, set_potentialEnergy, set_temperature,\n set_alchemicalLambda, set_protocolWork):\n self._n_atoms = n_atoms\n self._parameters = parameters\n self._handle.root._v_attrs.title = str(title)\n self._handle.root._v_attrs.conventions = str('Pande')\n self._handle.root._v_attrs.conventionVersion = str('1.1')\n self._handle.root._v_attrs.program = str('MDTraj')\n self._handle.root._v_attrs.programVersion = str(mdtraj.version.full_version)\n self._handle.root._v_attrs.method = str('BLUES')\n self._handle.root._v_attrs.methodVersion = str(blues.__version__)\n self._handle.root._v_attrs.reference = str('DOI: 10.1021/acs.jpcb.7b11820')\n\n if not hasattr(self._handle.root._v_attrs, 'application'):\n self._handle.root._v_attrs.application = str('OpenMM')\n self._handle.root._v_attrs.applicationVersion = str(simtk.openmm.version.full_version)\n\n # create arrays that store frame level informat\n if set_coordinates:\n self._create_earray(\n where='/', name='coordinates', atom=self.tables.Float32Atom(), shape=(0, self._n_atoms, 3))\n self._handle.root.coordinates.attrs['units'] = str('nanometers')\n\n if set_time:\n self._create_earray(where='/', name='time', atom=self.tables.Float32Atom(), shape=(0, ))\n self._handle.root.time.attrs['units'] = str('picoseconds')\n\n if set_cell:\n self._create_earray(where='/', name='cell_lengths', atom=self.tables.Float32Atom(), shape=(0, 3))\n self._create_earray(where='/', name='cell_angles', atom=self.tables.Float32Atom(), shape=(0, 3))\n self._handle.root.cell_lengths.attrs['units'] = str('nanometers')\n self._handle.root.cell_angles.attrs['units'] = str('degrees')\n\n if set_velocities:\n self._create_earray(\n where='/', name='velocities', atom=self.tables.Float32Atom(), shape=(0, self._n_atoms, 3))\n self._handle.root.velocities.attrs['units'] = str('nanometers/picosecond')\n\n if set_kineticEnergy:\n self._create_earray(where='/', name='kineticEnergy', atom=self.tables.Float32Atom(), shape=(0, ))\n self._handle.root.kineticEnergy.attrs['units'] = str('kilojoules_per_mole')\n\n if set_potentialEnergy:\n self._create_earray(where='/', name='potentialEnergy', atom=self.tables.Float32Atom(), shape=(0, ))\n self._handle.root.potentialEnergy.attrs['units'] = str('kilojoules_per_mole')\n\n if set_temperature:\n self._create_earray(where='/', name='temperature', atom=self.tables.Float32Atom(), shape=(0, ))\n self._handle.root.temperature.attrs['units'] = str('kelvin')\n\n #Add another portion akin to this if you want to store more data in the h5 file\n if set_alchemicalLambda:\n self._create_earray(where='/', name='alchemicalLambda', atom=self.tables.Float32Atom(), shape=(0, ))\n self._handle.root.alchemicalLambda.attrs['units'] = str('dimensionless')\n\n if set_protocolWork:\n self._create_earray(where='/', name='protocolWork', atom=self.tables.Float32Atom(), shape=(0, ))\n self._handle.root.protocolWork.attrs['units'] = str('kT')\n\n if parameters:\n if 'Logger' in self._parameters: self._parameters.pop('Logger')\n paramjson = json.dumps(self._parameters)\n self._encodeStringForPyTables(string=paramjson, name='parameters')\n\n if set_environment:\n try:\n envout = subprocess.check_output('conda env export --no-builds', shell=True, stderr=subprocess.STDOUT)\n envjson = json.dumps(yaml.load(envout), sort_keys=True, indent=2)\n self._encodeStringForPyTables(envjson, name='environment')\n except Exception as e:\n print(e)\n pass", "def __init__(self, name, a=115, b=111, c=19, mu=10**7, omegavec=[0, 0, 1],\r\n rho=0.5, szscale=2, n=0):\r\n assert(len(omegavec) == 3)\r\n assert(szscale >= 1)\r\n assert(n >= 0)\r\n\r\n # set the name\r\n self.name = name\r\n\r\n # set the rotation axis\r\n self.omegavec = omegavec\r\n\r\n # set the principal axes\r\n self.a = a\r\n self.b = b\r\n self.c = c\r\n\r\n # set the size scale\r\n self.szscale = szscale\r\n\r\n # convert the axes from meters to cm\r\n a *= 100\r\n b *= 100\r\n c *= 100\r\n\r\n # set the maximum allowed size\r\n self.sizecut = szscale*np.max([a, b, c])/2\r\n\r\n # set viscosity, create a Constant to avoid slowdowns\r\n self.mu = Constant(mu)\r\n\r\n # initialize the time, and the number of cycles\r\n self.t = 0\r\n self.ind = 0\r\n\r\n # set dt to 1 temporarily, for use in the solvers\r\n self.dt = Constant(1)\r\n\r\n # set density, create a Constant to avoid slowdowns\r\n self.rho = Constant(rho)\r\n\r\n # set the inital time, for logging\r\n self.start_time = time.time()\r\n\r\n # read in mesh, with n refinements\r\n with pkg_resources.path('SAMUS.meshes', '3ball%s.xml' % (n)) as p:\r\n mesh_path = p\r\n self.mesh = Mesh(str(mesh_path))\r\n\r\n # rescale the mesh to the input ellipsoids\r\n self.mesh.coordinates()[:, 0] *= a/2\r\n self.mesh.coordinates()[:, 1] *= b/2\r\n self.mesh.coordinates()[:, 2] *= c/2\r\n\r\n # use Elements to make a mixed function space\r\n V = VectorElement(\"CG\", self.mesh.ufl_cell(), 2)\r\n Q = FiniteElement(\"CG\", self.mesh.ufl_cell(), 1)\r\n self.Z = FunctionSpace(self.mesh, V*Q)\r\n\r\n # create actual function spaces which compose the mixed\r\n self.V = VectorFunctionSpace(self.mesh, \"CG\", 2)\r\n self.Q = FunctionSpace(self.mesh, \"CG\", 1)\r\n\r\n # create solution functions from the mixed space\r\n self.up = Function(self.Z) # solution function\r\n self.u_p_ = Function(self.Z) # function for previous solutions\r\n\r\n # get trial and test functions from the mixed space\r\n dup = TrialFunction(self.Z)\r\n v, q = TestFunctions(self.Z)\r\n\r\n # create the function of the rotation vector\r\n self.omega = interpolate(Constant(tuple(omegavec)), self.V)\r\n\r\n # split the solution functions\r\n self.u, self.p = split(self.up)\r\n u_, p_ = split(self.u_p_)\r\n\r\n # set solution functions to 0\r\n self.up.assign(Constant((0, 0, 0, 0)))\r\n self.u_p_.assign(Constant((0, 0, 0, 0)))\r\n\r\n # create the functions for storing the forces\r\n self.ftides = Function(self.V) # tides\r\n self.gravity = Function(self.V) # gravity\r\n self.centrifugal = Function(self.V) # centrifugal\r\n self.coriolis = Function(self.V) # coriolis\r\n self.forcing = Function(self.V) # total forces\r\n\r\n # name the functions for storage\r\n self.ftides.rename(\"Tidal Force\", \"Tidal Force\")\r\n self.gravity.rename(\"Self-Gravity\", \"Gravitational Force\")\r\n self.centrifugal.rename(\"Centrifugal\", \"Centrifugal Force\")\r\n self.coriolis.rename(\"Coriolis\", \"Coriolis Force\")\r\n self.forcing.rename(\"Forcing\", \"Total force on the object\")\r\n\r\n # create a constant to ensure solution stability\r\n A = Constant(1e4/max(mu, 1e4))\r\n\r\n # create the solution for the Navier-Stokes equations\r\n F = (\r\n # acceleration term\r\n A*self.rho*inner(((self.u-u_)/(self.dt)), v) * dx +\r\n\r\n # viscosity term\r\n A*self.mu*inner(grad(self.u), grad(v)) * dx +\r\n\r\n # advection term\r\n A*self.rho*inner(dot(self.u, nabla_grad(self.u)), v) * dx -\r\n\r\n # pressure term\r\n A*self.p*div(v) * dx +\r\n\r\n # mass continuity equation\r\n q*div(self.u) * dx -\r\n\r\n # force term\r\n A*inner(self.forcing, v) * dx)\r\n\r\n # find the derivative, for speed\r\n J = derivative(F, self.up, dup)\r\n\r\n # set up the Navier-Stokes solver\r\n problem = NonlinearVariationalProblem(F, self.up, J=J)\r\n self.solver = NonlinearVariationalSolver(problem)\r\n self.solver.parameters['newton_solver']['relaxation_parameter'] = 1.\r\n\r\n # split solution functions for access (weird FEniCS quirk)\r\n self.u, self.p = self.up.split()\r\n u_, p_ = self.u_p_.split()\r\n\r\n # name the solution functions\r\n self.u.rename(\"Velocity\", \"Velocity\")\r\n self.p.rename(\"Pressure\", \"Pressure\")\r\n\r\n # COMPUTE FUNCTIONS FOR GRAVITY SOLUTIONS\r\n self.G = Constant(6.674e-8) # sets gravitational constant, in cgs\r\n\r\n # get solution, trial, and test functions\r\n self.gravgs = Function(self.Z)\r\n dgs = TrialFunction(self.Z)\r\n gravh, gravc = TestFunctions(self.Z)\r\n gravg, gravs = split(self.gravgs)\r\n\r\n # set a scale to ensure the stability of the solution. this is undone\r\n # in the solution, but for unknown reasons O(10^-8) is too large for\r\n # the solver to maintain stability\r\n self.gravscale = 1e-3\r\n\r\n # compute the scaling constant for the Gaussian gravity form, which is\r\n # rescaled by self.gravscale. A Constant, for speed\r\n gravA = Constant(4*np.pi*float(self.G)*float(self.rho)*self.gravscale)\r\n\r\n # creates the equation set for Gaussian gravity\r\n gravF = (\r\n # this equation is 0=0, used to mix vector and scalar solutions\r\n gravs*div(gravh) * dx + inner(gravg, gravh) * dx +\r\n # this equation is the Gaussian form, div(g)=-4 pi G rho\r\n gravc*div(gravg) * dx + gravA*gravc * dx)\r\n\r\n # find the derivative, for speed\r\n gravJ = derivative(gravF, self.gravgs, dgs)\r\n\r\n # set up the gravitational solver\r\n gravproblem = NonlinearVariationalProblem(gravF, self.gravgs, J=gravJ)\r\n self.gravsolver = NonlinearVariationalSolver(gravproblem)\r\n self.gravsolver.parameters['newton_solver'\r\n ]['relaxation_parameter'] = 1.", "def set_TranslationsInTiltSeries(self, TiltSeries_):\n for (kk, Proj) in enumerate(TiltSeries_._ProjectionList):\n Proj._alignmentTransX = self._alignmentTransX[kk]\n Proj._alignmentTransY = self._alignmentTransY[kk]", "def HeaderParams(self, mstr, val):\n\n if len(val) > 0:\n if 'template' in mstr:\n self.templatename = val\n elif 'tenant' in mstr:\n self.tenantname = val\n elif 'policy' in mstr:\n self.policyname = val", "def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0", "def test_3d_tranpose(): \n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n fdic,fdata = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n\n assert_array_equal(data.transpose()[0,1,2],fdata.transpose()[0,1,2])\n assert_array_equal(data.transpose((2,0,1))[0,1,2],\n fdata.transpose((2,0,1))[0,1,2])\n assert_array_equal(data.swapaxes(0,1)[0,1,2],fdata.swapaxes(0,1)[0,1,2])\n assert_array_equal(data.swapaxes(2,0)[0,1,2],fdata.swapaxes(2,0)[0,1,2])", "def setTranslationsInTiltSeries(self, TiltSeries_):\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n proj.setAlignmentTransX(self._alignmentTransX[kk])\n proj.setAlignmentTransY(self._alignmentTransY[kk])", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def __prepare_dh_params(self):\n self.alpha = symbols('alpha0:' + str(self.joint_count))\n self.a = symbols('a0:' + str(self.joint_count))\n self.q = symbols('q1:' + str(self.joint_count + 1))\n self.d = symbols('d1:' + str(self.joint_count + 1))", "def __set_dh_params(self, joints):\n self.dh_params = {}\n\n for i in range(len(joints)):\n self.dh_params[self.alpha[i]] = joints[i].alpha\n\n self.dh_params[self.a[i]] = joints[i].a\n\n if joints[i].structure == 'revolute':\n self.dh_params[self.q[i]] = self.q[i]\n self.dh_params[self.d[i]] = joints[i].d\n\n elif joints[i].structure == 'prismatic':\n self.dh_params[self.q[i]] = joints[i].q\n self.dh_params[self.d[i]] = self.d[i]\n\n elif joints[i].structure == 'constant':\n self.dh_params[self.q[i]] = joints[i].q\n self.dh_params[self.d[i]] = joints[i].d\n\n self.__set_transform_matrices()", "def setpos(self, npvec3):\n homomat_bdb = self._bdb.get_homomat()\n homomat_bdb[:3, 3] = npvec3\n self._bdb.set_homomat(homomat_bdb)\n super().sethomomat(homomat_bdb)", "def __init__(self, params, print_df=True, print_help=False):\n stellar_type, position, parallax, proper_motion, v_radial = params\n self.init_params = params\n self.stellar_type = stellar_type\n self.proper_motion = proper_motion # [mas/year, mas/year]\n self.distance = 1/parallax # parsecs\n self.parallax = parallax # arcsecs\n self.position = position # [hms, dms]\n self.v_radial = v_radial # km/s\n\n self.galactic_coords = radec_to_galactic(self.position) # degrees\n\n # Proper motion, described in Cartesian components\n self.pm_dec = self.proper_motion[1]\n # We don't need to scale by cos(dec) because the units are already in mas/year\n self.pm_ra = self.proper_motion[0] #* np.cos(self.pm_dec)\n\n # Proper motion, described in angular components\n self.pm_mag = np.sqrt(self.pm_ra**2 + self.pm_dec**2) # mas/year\n # PA = angle east of north\n self.pm_posang = round(np.arctan(self.pm_ra/self.pm_dec), 4) # radians\n\n self.v_transverse = 4.74 * self.pm_mag * self.distance # km/s\n\n # Space velocity is the third leg of the v_trans/v_rad triangle.\n self.v_space = np.sqrt(self.v_transverse**2 + self.v_radial**2)\n\n star_obj = SkyCoord(Angle(position[0]), Angle(position[1]), frame='icrs')\n self.constellation = get_constellation(star_obj)\n\n self.d_from_GC = self.distance_to_galactic_center() # parsecs\n self.closer = True if self.d_from_GC > d_sun_GC else False\n\n d = [{'Name': 'Stellar Type', 'Value': self.stellar_type, 'units': 'N/A'},\n {'Name': 'Distance', 'Value': self.distance, 'units': 'parsec'},\n {'Name': 'Parallax', 'Value': self.parallax, 'units': 'arcsecs'},\n {'Name': 'Position', 'Value': self.position, 'units': '[hms, dms]'},\n {'Name': 'Galactic Coordinates', 'Value': self.galactic_coords,\n 'units': 'degrees'},\n {'Name': 'Proper Motion (RA)', 'Value': self.pm_ra, 'units': 'mas/year'},\n {'Name': 'Proper Motion (Dec)', 'Value': self.pm_dec, 'units': 'mas/year'},\n {'Name': 'Proper Motion Magnitude', 'Value': self.pm_mag, 'units': 'mas/year'},\n {'Name': 'Proper Motion Position Angle', 'Value': self.pm_posang,\n 'units': 'radians'},\n {'Name': 'Radial Velocity', 'Value': self.v_radial, 'units': 'km/s'},\n {'Name': 'Transverse Velocity', 'Value': self.v_transverse, 'units': 'km/s'},\n {'Name': 'Space Velocity', 'Value': self.v_space, 'units': 'km/s'},\n {'Name': 'Host Constellation', 'Value': self.constellation, 'units': 'N/A'},\n {'Name': 'Distance from Galactic Center', 'Value': self.d_from_GC,\n 'units': 'parsecs'},\n {'Name': 'Closer than Sun to GC?', 'Value': self.closer, 'units': 'N/A'}\n ]\n\n self.full_param_df = pd.DataFrame(d)\n\n if print_help:\n print getdoc(self), '\\n\\n'\n\n if print_df:\n print self.full_param_df", "def set_params_proj(ima, p, xform = \"xform.projection\"):\n\tfrom EMAN2 import Vec2f\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2]})\n\tt.set_trans(Vec2f(-p[3], -p[4]))\n\tima.set_attr(xform, t)", "def __init__(self, channels):\n super(PositionalEncodingPermute3D, self).__init__()\n self.penc = PositionalEncoding3D(channels)", "def __init__(self, theta=0, r=1,h=10,phi=0.2,Qa=(10,10),Qb=(10,10)):\n self.theta=theta\n self.r=r\n self.phi=phi\n self.h=h\n \n self.Qa0=Qa[0]\n self.Qa1=Qa[1]\n self.Qb0=Qb[0]\n self.Qb1=Qb[1]\n \n self.c1=self.Qa1-self.Qa0\n self.c2=self.Qb1-self.Qb0\n \n self.NewOrigin=(25,25)\n self.RotateAngle=-np.pi*3/4\n \n self.Te=0", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def setThetaParams(self,param_list,value_list):\n for param,value in zip(param_list,value_list):\n _p=param.split()\n if (not param in self.theta_params):\n raise RuntimeError(param)\n if len(_p)==2:\n self.setMorphParam(_p,value,None)\n elif len(_p)==3:\n self.setChannelParam(_p,value,None)\n else:\n raise RuntimeError", "def give_orientation(pose, orr_array):\n pose.orientation.x = orr_array[0]\n pose.orientation.y = orr_array[1]\n pose.orientation.z = orr_array[2]\n pose.orientation.w = orr_array[3]", "def setheading(self, rot):\n if(self._gridmode):\n rot = round(rot/90)*90\n \n self._rotation = round(rot, 2)\n self._appendCurrentState()", "def setParameters(self, sx_sim=None):\n # TODO rething that ..\n #if sx_sim is not None:\n #if ds_model is not None:\n #if di_model is not None:\n self.sx_sim = sx_sim\n p = defaultParams(chord=self._chord, rho=self._rho, sx=self.sx_sim, ds=self.ds_model, di=self.di_model,\n M=self._M33, C=self._C33, K=self._K33)\n p['beta'] = self._beta\n if len(p['Iq'])==0:\n raise Exception('No states are present')\n\n # --- Dynamic inflow / induction\n p['a0'] = self._a0\n p['ap0'] = self._ap0\n p['di_tau1'] = self.di_tau1\n p['di_tau2'] = self.di_tau2\n\n # --- Aerodynamic parameters\n if self._y_AQ>0: \n print('[WARN] y_AQ positive is unconventional')\n p['y_AQ'] = self._y_AQ\n if self._y_AT is None:\n p['y_AT'] = self._y_AQ+self._chord/2 # default is approximatively half a chord behind\n else:\n p['y_AT'] = self._y_AT\n p['x_AQ'] = self._x_AQ\n p['x_AT'] = self._x_AT\n if self._ppol is None:\n raise Exception('Polar parameters need to be set')\n p.update(self._ppol)\n # # p.update({'linModel':False, 'drag':drag})\n\n self.p_sim = p", "def setRotationsInTiltSeries(self, TiltSeries_):\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n proj.setAlignmentRotation(self._alignmentRotations[kk])", "def updateHeaderComputedValues( self ):\n self.nAvgBytesPerSec = int( self.nNbrChannel*self.nSamplingRate*self.nNbrBitsPerSample/8 )\n self.nSizeBlockAlign = int( self.nNbrChannel*self.nNbrBitsPerSample/8 )\n self.dataType = Wav.getDataType( self.nNbrBitsPerSample )", "def set_params(self, params: Dict) -> None:\n self.leak.set_g(params['g_leak'])\n self.nav.set_g(params['g_nav'])\n self.kvhh.set_g(params['g_kvhh'])\n self.kva.set_g(params['g_kva'])\n self.kvsi.set_g(params['g_kvsi'])\n self.cav.set_g(params['g_cav'])\n self.kca.set_g(params['g_kca'])\n self.nap.set_g(params['g_nap'])\n self.kir.set_g(params['g_kir'])\n self.ampar.set_g(params['g_ampar'])\n self.nmdar.set_g(params['g_nmdar'])\n self.gabar.set_g(params['g_gabar'])\n self.tau_ca = params['t_ca']", "def _object_kinematics_params(self):\n obj_length, obj_width = self._obj_dims\n # Initial object position w.r.t its center\n obj_coords = np.matmul( # (2, 5) array of x-y coords of five points\n np.array([ # rotational matrix\n [np.cos(self._theta_init), np.sin(self._theta_init)],\n [-np.sin(self._theta_init), np.cos(self._theta_init)]\n ]),\n 0.5 * np.array([ # relative postion matrix\n [0, obj_length, obj_length, -obj_length, -obj_length],\n [0, obj_width, -obj_width, -obj_width, obj_width]\n ])\n )\n feat_vec_desired = obj_coords * self._fz_ratio\n\n # Global initial object position\n obj_coords += np.array([[self._x_obj_0], [self._y_obj_0]])\n speed = np.array([\n [(self._x_obj_f - self._x_obj_0) / self._t_sim],\n [(self._y_obj_f - self._y_obj_0) / self._t_sim]\n ])\n rot_speed = (self._theta_final - self._theta_init) / self._t_sim\n return obj_coords, speed, rot_speed, feat_vec_desired", "def setBorder3D():\n dislin.box3d()", "def setView3D(x,y,z, viewtype='absolute'):\n vdict = {'absolute':'ABS','user':'USER','angle':'ANGLE'}\n dislin.view3d(x,y,z,vdict[viewtype])", "def setheaders(f):\n f.headers['OBSERVER'] = \"'%s'\" % camera.status.observer\n f.headers['FILTERID'] = \"'%s'\" % filtname(camera.status.filter)\n f.headers['FILTER'] = \"%1d\" % camera.status.filter\n f.headers['XYSTAGE'] = \"'%d,%d'\" % camera.status.guider\n f.headers['MIRROR'] = \"'%s'\" % camera.status.mirror\n if camera.status.imgtype == 'BIAS':\n f.headers['BIAS'] = camera.status.object\n elif camera.status.imgtype == 'DARK':\n f.headers['DARK'] = camera.status.object\n else:\n f.headers['OBJECT'] = camera.status.object\n try:\n skytemp = weather.status.skytemp\n f.headers['SKYTEMP'] = \"%4.1f\" % skytemp\n f.comments['SKYTEMP'] = \"'Infrared sky temp in degC'\"\n except:\n pass\n\n try:\n if not camera.status.TJ.current.posviolate: #Position calibrated to epoch\n ra = camera.status.TJ.current.Ra/15/3600\n dec = camera.status.TJ.current.Dec/3600\n epoch = camera.status.TJ.current.Epoch\n alt = camera.status.TJ.current.Alt\n GotTJ = True\n elif camera.status.TJ.current.RaC:\n ra = camera.status.TJ.current.RaC\n dec = camera.status.TJ.current.DecC\n alt = camera.status.TJ.current.Alt\n t = time.gmtime()\n epoch = t.tm_year + (t.tm_yday/366.0)\n GotTJ = True\n else:\n GotTJ = False\n except AttributeError:\n GotTJ = False \n if GotTJ:\n f.headers['RA_OBJ'] = \"%12.9f\" % (ra*15.0)\n f.headers['RA'] = \"'%s'\" % sexstring(ra)\n f.headers['DEC_OBJ'] = \"%13.9f\" % dec\n f.headers['DEC'] = \"'%s'\" % sexstring(dec)\n f.headers['EQUINOX'] = \"%6.1f\" % epoch\n f.headers['SECZ'] = \"%6.3f\" % (1/math.cos((90-alt)*math.pi/180))\n if GotFT:\n hjd,message = fitstime.findtime(fimage=f, verbose=0, allfields=0)\n if type(hjd) == float:\n f.headers['HJD'] = \"%f\" % hjd\n f.comments['HJD'] = \"Heliocentric Julian Day at exposure midpoint\"", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def __init__(self, encut, spinaxis, ldaul, Uparam, Jparam, name='DFTCL_settings'):\n ncl_settings = {\"ISPIN\": 2, \"MAGMOM\": None, \"SAXIS\": spinaxis, \"LSORBIT\": \".TRUE.\", \"LNONCOLLINEAR\": \".TRUE.\"}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=ncl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)", "def set_symmetry(self):\n if self.symmetry == 'cubic':\n self.c = self.b = self.a\n self.alpha = self.beta = self.gamma = 90.0\n elif self.symmetry == 'tetragonal':\n self.b = self.a\n self.alpha = self.beta = self.gamma = 90.0\n elif self.symmetry == 'orthorhombic':\n self.alpha = self.beta = self.gamma = 90.0\n elif self.symmetry == 'hexagonal':\n self.b = self.a\n self.alpha = self.beta = 90.0\n self.gamma = 120.0\n elif self.symmetry == 'monoclinic':\n self.alpha = self.gamma = 90.0", "def __init__(self, encut, magmom, ldaul, Uparam, Jparam, name=\"DFTCL_settings\"):\n\n cl_settings = {\"ISPIN\": 2, \"MAGMOM\": magmom, \"SAXIS\": None, \"LSORBIT\": None, \"LNONCOLLINEAR\": None}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIMX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=cl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"encut\", encut)", "def set_body_frame_position_vectors(pa):\n nb = pa.nb[0]\n # loop over all the bodies\n for i in range(nb):\n fltr = np.where(pa.body_id == i)[0]\n cm_i = pa.cm[3 * i:3 * i + 3]\n R_i = pa.R[9 * i:9 * i + 9]\n for j in fltr:\n dx = pa.x[j] - cm_i[0]\n dy = pa.y[j] - cm_i[1]\n dz = pa.z[j] - cm_i[2]\n\n pa.dx0[j] = (R_i[0] * dx + R_i[3] * dy + R_i[6] * dz)\n pa.dy0[j] = (R_i[1] * dx + R_i[4] * dy + R_i[7] * dz)\n pa.dz0[j] = (R_i[2] * dx + R_i[5] * dy + R_i[8] * dz)", "def visualize_3d(grbdir,x, y, z, t, thetax, thetay, name):\n # Set ax.azim and ax.elev to ra, dec\n global runconf\n\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n plt.suptitle(r\"Visualisation of {name} in 3d:$\\theta_x$={tx:0.1f},$\\theta_y$={ty:0.1f}\".format(name=name, tx=thetax, ty=thetay))\n # Z\n ax = plt.subplot(2, 2, 1, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = z.ra.deg\n ax.elev = z.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI pointing (z)\")\n\n # Transient\n ax = plt.subplot(2, 2, 2, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = t.ra.deg\n ax.elev = t.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from nominal \\n transient direction\")\n\n # X\n ax = plt.subplot(2, 2, 3, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = x.ra.deg\n ax.elev = x.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI X axis\")\n\n # Z\n ax = plt.subplot(2, 2, 4, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = y.ra.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI Y axis\")\n\n return", "def write_initparams(params, outdir, padding_var=7, paramsfn='parameters', skiplat=False, skipglat=False):\n paramfile = outdir + paramsfn + '.txt'\n with open(paramfile, 'w') as myfile:\n myfile.write('# Parameters\\n')\n\n dio.ensure_dir(outdir)\n for key in params:\n if key == 'reg1' or key == 'reg2' or key == 'reg3':\n np.savetxt(outdir + key + '.txt', params[key], fmt='%d', delimiter=',', header=key + ' particle IDs')\n if key == 'xyv0':\n np.savetxt(outdir + 'xyv0.txt', params['xyv0'], delimiter=',',\n header='xy0 (initial positions) v0 (initial velocities)')\n elif key == 'xy':\n if not skiplat:\n np.savetxt(outdir + 'xy.txt', params['xy'], delimiter=',',\n header='xy0 (undeformed lattice positions from mesh)')\n elif key == 'KL':\n if not skiplat:\n np.savetxt(outdir + 'KL.txt', params['KL'], fmt='%i', delimiter=',',\n header='KL (Bond Connectivity List)')\n elif key == 'NL':\n if not skiplat:\n np.savetxt(outdir + 'NL.txt', params['NL'], fmt='%i', delimiter=',', header='NL (Neighbor List)')\n elif key == 'BND':\n np.savetxt(outdir + 'BND.txt', params['BND'], fmt='%i', header='BND (Boundary List)')\n elif key == 'OmK':\n if not skipglat:\n np.savetxt(outdir + 'OmK.txt', params['OmK'], fmt='%f', delimiter=',',\n header='OmK (spring frequency array, for Nash limit: (-1)^(c+b)kl^2/Iw')\n elif key == 'OmG':\n if not skipglat:\n np.savetxt(outdir + 'Omg.txt', params['OmG'], fmt='%f', delimiter=',',\n header='Omg (gravitational frequency array, for Nash limit: (-1)^(c+1)mgl/Iw')\n elif key == 'LVUC':\n if not skiplat:\n np.savetxt(outdir + 'LVUC.txt', params['LVUC'], fmt='%i', delimiter=',',\n header='Lattice Vector and Unit cell vector coordinates')\n else:\n with open(paramfile, 'a') as myfile:\n # print 'Writing param ', str(key)\n # print ' with value ', str(params[key])\n # print ' This param is of type ', type(params[key])\n\n if isinstance(params[key], str):\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + params[key] + '\\n')\n elif isinstance(params[key], np.ndarray):\n # print params[key].dtype\n if key == 'BIND':\n print 'BIND = ', str(params[key]).replace('\\n', '')\n\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + \", \".join(np.array_str(params[key]).split()).replace('[,', '[') + '\\n')\n # if params[key].dtype == 'float64':\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ np.array_str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n # elif params[key].dtype == 'int32':\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n # else:\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n elif isinstance(params[key], list):\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + str(params[key]) + '\\n')\n else:\n # print key, ' = ', params[key]\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + '{0:.12e}'.format(params[key]) + '\\n')\n\n # elif key == 'LV':\n # np.savetxt(outdir+'LV.txt',params['LV'], fmt='%18e',delimiter=',', header='Lattice Vector coordinates')\n # elif key == 'UC':\n # np.savetxt(outdir+'UC.txt',params['UC'], fmt='%18e',delimiter=',', header='Unit cell vector coordinates')\n #\n # elif key == 'h':\n # with open(outdir+'h.txt', \"w\") as hfile:\n # hfile.write(\"# h (time step) \\n{0:5e}\".format(h) )\n # elif key == 'beta':\n # with open(outdir+'beta.txt', \"w\") as betafile:\n # betafile.write(\"# beta (damping coeff) \\n{0:5e}\".format(beta) )", "def getOptimizableVariables(self, TiltAlignmentParameters_):\n ntilt = self._ntilt\n nmark = len(self._Markers)\n\n nopti = (nmark - 1) * 3\n\n # translation\n if self.optimizeMarkerPositions:\n nopti += (ntilt) * 2\n\n # variable magnifications for projections\n if TiltAlignmentParameters_.dmag:\n nopti = nopti + ntilt - 1\n\n #check that irefmark and ireftilt are set properly\n if not (TiltAlignmentParameters_.irefmark in range(nmark)):\n TiltAlignmentParameters_.irefmark = 0\n print(\"Warning: irefmark must be 1<= irefmark <=nmark\")\n print(\"New irefmark: \" + str(TiltAlignmentParameters_.irefmark))\n\n if not (TiltAlignmentParameters_.ireftilt in self._projIndices.astype(int)):\n TiltAlignmentParameters_.ireftilt = abs(self._tiltAngles).argmin()\n print(\"Warning: ireftilt must be in range of projection indices\")\n print(\"New ireftilt: \" + str(TiltAlignmentParameters_.ireftilt))\n\n #variable rotation for projections\n if TiltAlignmentParameters_.drot:\n nopti = nopti + ntilt\n else:\n nopti = nopti + 1\n\n # beam tilt\n if TiltAlignmentParameters_.dbeam:\n nopti = nopti + 1\n\n ## gradient on image rotation and magnification in projections\n #if TiltAlignmentParameters_.dGradRotMag:\n # nopti = nopti + 2\n\n\n # nopti += ntilt\n\n optimizableVariables = numpy.zeros((nopti), dtype='float')\n\n # marker 3D coords\n\n ivar = 0\n for (imark, Marker) in enumerate(self._Markers):\n # reference marker irefmark is fixed to standard value\n if ((imark ) != TiltAlignmentParameters_.irefmark):\n r = Marker.get_r()\n optimizableVariables[ivar] = r[0]\n optimizableVariables[ivar + 1] = r[1]\n optimizableVariables[ivar + 2] = r[2]\n ivar = ivar + 3\n\n # translations\n if self.optimizeMarkerPositions:\n for itilt in range(0, ntilt):\n # translation in reference projection is zero\n #if self._projIndices[itilt] != TiltAlignmentParameters_.ireftilt:\n optimizableVariables[ivar] = self._alignmentTransX[itilt]\n optimizableVariables[ivar + 1] = self._alignmentTransY[itilt]\n ivar = ivar + 2\n\n # magnification changes\n if TiltAlignmentParameters_.dmag:\n for itilt in range(0, ntilt):\n # magnification of reference projection is 1.\n if int(self._projIndices[itilt]) != TiltAlignmentParameters_.ireftilt:\n optimizableVariables[ivar] = self._alignmentMagnifications[itilt]\n ivar = ivar + 1\n\n # image rotations\n if TiltAlignmentParameters_.drot:\n for itilt in range(0, ntilt):\n optimizableVariables[ivar] = self._alignmentRotations[itilt]\n ivar = ivar + 1\n\n # all rotations are the same - take the first one\n else:\n optimizableVariables[ivar] = self._alignmentRotations[0]\n ivar = ivar + 1\n\n # beam inclination\n if TiltAlignmentParameters_.dbeam:\n optimizableVariables[ivar] = self._alignmentBeamTilt\n ivar = ivar + 1\n\n # focus gradient (TODO)\n #if TiltAlignmentParameters_.dGradRotMag:\n # optimizableVariables[ivar] = self._alignmentMagnFoc\n # optimizableVariables[ivar+1] = self._alignmentRotFoc\n\n # for i in range(ntilt):\n # optimizableVariables[ivar] = -1\n # ivar += 1\n\n return optimizableVariables", "def set_params(self, params: Dict) -> None:\n self.leak.set_g(params[\"g_leak\"])\n self.kvhh.set_g(params[\"g_kvhh\"])\n self.cav.set_g(params[\"g_cav\"])\n self.kca.set_g(params[\"g_kca\"])\n self.nap.set_g(params[\"g_nap\"])\n self.tau_ca = params[\"t_ca\"]", "def task_three():\n # Formula to calculate:\n # q2 = (z2 / z1) * (R + T * nt / d) * q1\n # where R - rotation\n # T - translation\n # nt - normal vertex of common plane of the 3d points\n # d - shift of the common plane\n # and (R + T * nt / d) required homography transform\n # defined up to constant\n # But in our case T == 0\n tetta = 30 * np.pi / 180\n H = np.array([[1, 0, 0],\n [0, np.cos(tetta), -np.sin(tetta)],\n [0, np.sin(tetta), np.cos(tetta)],\n ])\n print(\"Homography transformation:\\n\", H)", "def set_params(self):\n max_margin = int(self.alpha) + 1\n self.sample_params['add'] = [0, max_margin, max_margin]", "def setup_orbit(self, t, halo_gas_density, galaxy_velocity):\n \n if any( [halo_gas_density > 1.0E-10] ) : # convert to mass density\n halo_gas_density = halo_gas_density * self.ic['mu_halo'] * cgs.mp\n \n # if t is an array, then use a cubic spline to make a function from the orbital\n # data. If t is a single value, then halo gas dnesity and velocity are constants..\n # make them into functions anyway to make rest of everything work...\n if np.size(halo_gas_density) > 1 : \n self.halo_density = interpolate.UnivariateSpline(t, halo_gas_density,k=3)\n else:\n self.halo_density = lambda x: halo_gas_density\n \n if np.size(galaxy_velocity) > 1:\n self.galaxy_velocity = interpolate.UnivariateSpline(t, galaxy_velocity ,k=3)\n else:\n self.galaxy_velocity = lambda x: galaxy_velocity", "def reorderParameters(self, p, *order):\n self._.a = tuple(p[i, i, 1] for i in range(self._.d + 1))\n self._.b = tuple(p[i, i+1, 1] if i < self._.d else Integer(0)\n for i in range(self._.d + 1))\n self._.c = tuple(p[i, i-1, 1] if i > 0 else Integer(0)\n for i in range(self._.d + 1))\n if self._has(\"omega\"):\n self._.omega = Matrix(SR, [[r[i] for i in order]\n for r in self._.omega])\n if self._has(\"theta\"):\n del self._.theta\n if self._has(\"fsd\"):\n del self._.fsd", "def inverse_transform3(phi, theta=0.0, psi=0.0, tx=0.0, ty=0.0, tz=0.0, mirror = 0, scale=1.0):\n\n\td = Transform({'type': 'spider', 'phi': phi, 'theta': theta, 'psi': psi, 'tx': tx, 'ty': ty, 'tz': tz, \"mirror\":mirror,\"scale\":scale})\n\td = d.inverse()\n\td = d.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def setParameters(self) -> None:\n # get a list of the header and data files in the folder\n self.headerF = glob.glob(os.path.join(self.dataPath, \"*.XTR\"))\n if len(self.headerF) == 0:\n self.headerF = glob.glob(os.path.join(self.dataPath, \"*.XTRX\"))\n self.dataF = glob.glob(os.path.join(self.dataPath, \"*.RAW\"))\n # data byte information might be different for each file\n # so it is a dictionary\n self.dataByteOffset: Dict = {}\n self.recChannels = {}\n self.dataByteSize = 4\n # data type\n self.dtype = np.float32\n # get the number of data files and header files - this should be equal\n self.numHeaderFiles: int = len(self.headerF)\n self.numDataFiles: int = len(self.dataF)", "def setup_orientation_annotation(self) :\n \n # Anatomical directions in LPS convention, numpy order\n directions_anatomical = {\n \"L\" : (0,0,+1),\n \"R\" : (0,0,-1),\n \"P\" : (0,+1,0),\n \"A\" : (0,-1,0),\n \"I\" : (-1,0,0),\n \"S\" : (+1,0,0),\n }\n \n # Index directions, numpy order\n directions_index = {\n \"+x\" : (0,0,+1),\n \"-x\" : (0,0,-1),\n \"+y\" : (0,+1,0),\n \"-y\" : (0,-1,0),\n \"+z\" : (-1,0,0),\n \"-z\" : (+1,0,0),\n }\n \n directions = (directions_anatomical \n if self.display_coordinates in [\"physical\", \"nearest_axis_aligned\"]\n else directions_index)\n \n # Window locations\n locations = {\n \"up\" : (1,0),\n \"down\" : (-1,0),\n \"left\" : (0,-1),\n \"right\" : (0,1)\n }\n \n for location, p in locations.items() :\n matrix = self._3d_world_to_slice\n direction = numpy.dot(self._3d_slice_to_world, numpy.hstack((0, p)))\n \n # Find closest in-slice direction based on dot product\n closest = None\n max_distance = -1\n for name, d in directions.items() :\n distance = numpy.dot(d, direction)\n if distance > max_distance :\n max_distance = distance\n closest = name\n \n # Set text\n index = self._orientation_annotation_index[location]\n self._orientation_annotation.SetText(index, closest)", "def __init__(self):\n self.rot_axis = 1", "def __init__(self,):\r\n self.g = 9.81\r\n self.l = 0.5\r\n self.m1 = 1.0\r\n self.m2 = 1.0\r\n self.m3 = 1.0\r\n self.r1 = 1.0\r\n self.r2 = 1.0\r\n self.tau = 0.001\r\n self.theta1 = 1.0\r\n self.theta2 = 1.0\r\n self.theta3 = 1.0", "def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)", "def d3transf_df3(self,f):\r\n raise NotImplementedError", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def InitialAlignment(self, scale = 0.15):\n\n\n pts3D = self.pts3D\n\n # Compute eigenvecs and rotate according to them\n pc, evals, mean = utils.pca(pts3D, remove_mean = True)\n pts3D_rot = np.dot(pc.T, pts3D)\n\n # Find length according to max eigenvector\n mins = np.min(pts3D_rot, axis=1)\n maxs = np.max(pts3D_rot, axis=1)\n max_length = maxs[0] - mins[0]\n \n # Rotation matrix is the covariance matrix, but we want Z as the leading\n # eigenvector:\n rot = np.c_[-pc[2], pc[1], pc[0]]\n\n # Transform model to have zero mean, reasonable scale and rotation.\n self.transform(rot, np.dot(rot, -mean), float(scale) / max_length)", "def assign_model_parameters(self,xmax,zmax,dh,duration):\n self.model_parameters['xmax']=xmax\n self.model_parameters['zmax']=zmax\n self.model_parameters['dh']=dh\n self.model_parameters['duration']=duration", "def change_heading(self, heading_change):\n\n # Apply change to processed data\n direction, mag = cart2pol(self.u_processed_mps, self.v_processed_mps)\n self.u_processed_mps, self.v_processed_mps = pol2cart(direction - np.deg2rad(heading_change), mag)\n\n # Apply change to unprocessed data\n direction, mag = cart2pol(self.u_mps, self.v_mps)\n self.u_mps, self.v_mps = pol2cart(direction - np.deg2rad(heading_change), mag)", "def translation(self, x, y, z) -> None:\n ...", "def setup_axes3(fig, rect):\n\n # rotate a bit for better orientation\n tr_rotate = Affine2D().translate(-95, 0)\n\n # scale degree to radians\n tr_scale = Affine2D().scale(np.pi/180., 1.)\n\n tr = tr_rotate + tr_scale + PolarAxes.PolarTransform()\n\n grid_locator1 = angle_helper.LocatorHMS(4)\n tick_formatter1 = angle_helper.FormatterHMS()\n\n grid_locator2 = MaxNLocator(3)\n\n ra0, ra1 = 8.*15, 14.*15\n cz0, cz1 = 0, 14000\n grid_helper = floating_axes.GridHelperCurveLinear(\n tr, extremes=(ra0, ra1, cz0, cz1),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None)\n\n ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)\n fig.add_subplot(ax1)\n\n # adjust axis\n ax1.axis[\"left\"].set_axis_direction(\"bottom\")\n ax1.axis[\"right\"].set_axis_direction(\"top\")\n\n ax1.axis[\"bottom\"].set_visible(False)\n ax1.axis[\"top\"].set_axis_direction(\"bottom\")\n ax1.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax1.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax1.axis[\"top\"].label.set_axis_direction(\"top\")\n\n ax1.axis[\"left\"].label.set_text(r\"cz [km$^{-1}$]\")\n ax1.axis[\"top\"].label.set_text(r\"$\\alpha_{1950}$\")\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.9 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax", "def set_calibration(self, px: float):\n self.meta_data['SizeX'] = px\n self.meta_data['SizeY'] = px\n self.meta_data['SizeZ'] = px", "def _set_params(self, x):\r\n assert x.size == self.num_params\r\n self.varianceU = x[0]\r\n self.varianceY = x[1]\r\n self.lengthscaleU = x[2]\r\n self.lengthscaleY = x[3]" ]
[ "0.59650105", "0.593235", "0.5929709", "0.58860916", "0.5739548", "0.5691223", "0.56045157", "0.5573526", "0.55701107", "0.5569729", "0.55564326", "0.55041516", "0.5470599", "0.5299311", "0.5288693", "0.5281004", "0.5277822", "0.52361757", "0.5231874", "0.5215359", "0.5197185", "0.5192886", "0.518786", "0.5173227", "0.5172853", "0.5144875", "0.5120197", "0.5104728", "0.51026654", "0.5094818", "0.509449", "0.50911427", "0.5090148", "0.50893706", "0.5063742", "0.50606185", "0.50591034", "0.5057929", "0.5055598", "0.5054295", "0.5050891", "0.5043482", "0.5032025", "0.50276583", "0.502616", "0.5018725", "0.5011608", "0.49999115", "0.4999839", "0.49923047", "0.49895656", "0.49775147", "0.49772808", "0.49769914", "0.49745315", "0.49628434", "0.4948783", "0.49460968", "0.49404848", "0.49374312", "0.49367034", "0.4932432", "0.4932338", "0.49309233", "0.4914994", "0.49085113", "0.49047056", "0.48994523", "0.4897088", "0.48952097", "0.48913237", "0.4887008", "0.4883523", "0.4866464", "0.48632857", "0.48590717", "0.48483336", "0.4840308", "0.48384002", "0.4837534", "0.4831703", "0.48262772", "0.48227572", "0.48185477", "0.48068112", "0.47976395", "0.47932413", "0.47912186", "0.47838444", "0.47806728", "0.47798878", "0.47798344", "0.47790408", "0.47751245", "0.47734854", "0.47724882", "0.47612238", "0.4759865", "0.4759831", "0.47572604" ]
0.65861595
0
retrieve projection alignment parameters from the header phi theta psi s2x s2y
def get_params_proj(ima, xform = "xform.projection"): t = ima.get_attr(xform) d = t.get_params("spider") return d["phi"],d["theta"],d["psi"],-d["tx"],-d["ty"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def get_acquisition_pars(theta=None, phi=None, shift=None, nx=None, ny=None, cfg=None):\n # ss_rect_map = {(13, 13): 1E7, (13, 14): 1E7, (13, 15): 1E7, (13, 16): 1E7, (13, 17): 1E7,\n # (14, 13): 1E7, (14, 14): 1E5, (14, 15): 1E5, (14, 16): 1E5, (14, 17): 1E7,\n # (15, 13): 1E7, (15, 14): 1E5, (15, 15): 5E4, (15, 16): 1E5, (15, 17): 1E7,\n # (16, 13): 1E7, (16, 14): 1E5, (16, 15): 1E5, (16, 16): 1E5, (16, 17): 1E7,\n # (17, 13): 1E7, (17, 14): 1E7, (17, 15): 1E7, (17, 16): 1E7, (17, 17): 1E7}\n nmeans_dict = {(15, 15): 1,\n(16, 15): 1,\n(16, 16): 1,\n(15, 16): 1,\n(14, 16): 1,\n(14, 15): 1,\n(14, 14): 1,\n(15, 14): 1,\n(16, 14): 1,\n(17, 14): 2,\n(17, 15): 1,\n(17, 16): 1,\n(17, 17): 2,\n(16, 17): 1,\n(15, 17): 1,\n(14, 17): 1,\n(13, 17): 1,\n(13, 16): 2,\n(13, 15): 1,\n(13, 14): 1,\n(13, 13): 5,\n(14, 13): 2,\n(15, 13): 1,\n(16, 13): 5,\n(17, 13): 5,\n(18, 13): 5,\n(18, 14): 5,\n(18, 15): 5,\n(18, 16): 5,\n(18, 17): 5,\n(18, 18): 5,\n(17, 18): 5,\n(16, 18): 2,\n(15, 18): 1,\n(14, 18): 1,\n(13, 18): 5,\n(12, 18): 5,\n(12, 17): 5,\n(12, 16): 5,\n(12, 15): 5,\n(12, 14): 5,\n(12, 13): 5,\n(12, 12): 5,\n(13, 12): 5,\n(14, 12): 5,\n(15, 12): 5,\n(16, 12): 5,\n(17, 12): 5,\n(18, 12): 5,\n(19, 12): 5,\n(19, 13): 5,\n(19, 14): 5,\n(19, 15): 5,\n(19, 16): 5,\n(19, 17): 5,\n(19, 18): 5,\n(19, 19): 5,\n(18, 19): 5,\n(17, 19): 5,\n(16, 19): 5,\n(15, 19): 5,\n(14, 19): 5,\n(13, 19): 5,\n(12, 19): 5,\n(11, 19): 5,\n(11, 18): 5,\n(11, 17): 5,\n(11, 16): 5,\n(11, 15): 5,\n(11, 14): 5,\n(11, 13): 5,\n(11, 12): 5,\n(11, 11): 5,\n(12, 11): 5,\n(13, 11): 5,\n(14, 11): 5,\n(15, 11): 5,\n(16, 11): 5,\n(17, 11): 5,\n(18, 11): 5,\n(19, 11): 5}\n\n\n\n # led_center = 15\n # led_disp = (int(cfg.array_size)+1)//2\n # led_range = range(led_center-led_disp, led_center+led_disp)\n # ledmap = product(led_range, led_range)\n #\n # ss_dict = {}\n # for led in ledmap:\n # # if led == [15, 15]:\n # # ss_dict[(led[0], led[1])] = 60E4\n # # else:\n # dist = (np.abs(led[0]-15)**2+np.abs(led[1]-15))\n # ss = 5.E5*(1+.5*dist)\n # ss_dict[(led[0], led[1])] = ss\n # if ss >3E6:\n # ss_dict[(led[0], led[1])] = 3E6\n\n power = 255\n # Camera parameters\n if nx is not None:\n # if nx == 14 or nx == 15 or nx ==16 or ny == 15 or ny ==16 or ny == 14:\n # shutter_speed = 50000\n # else:\n # shutter_speed = 500000\n # nmeans = nmeans_dict[nx, ny]\n # if [nx, ny] in [[15, 15], [15, 16], [14, 17], [14,16], [14, 15],\n # [14, 14], [13,16], [13, 15]]:\n # shutter_speed = 100000\n # nmeans = 1\n # else:\n # shutter_speed = 600000\n # nmeans = 1\n\n try:\n # shutter_speed = ss_dict[nx, ny]\n shutter_speed = 50000\n nmeans = nmeans_dict[nx, ny]\n except:\n shutter_speed = 1E5\n nmeans = 1\n return float(cfg.iso), shutter_speed, power, nmeans\n\n shutter_speed_min = cfg.shutter_speed[0]\n shutter_speed_max = cfg.shutter_speed[0]\n if phi == None:\n if shift == None:\n raise Exception(\"Must assign a value either for phi or shift.\")\n shutter_speed = translate(phi, 0, cfg.shift_max,\n shutter_speed_min, shutter_speed_max)\n else:\n shutter_speed = translate(phi, 0, 90,\n shutter_speed_min, shutter_speed_max)\n # Led parameters\n led_power = cfg.max_led_power\n return cfg.iso, shutter_speed, led_power, nmeans", "def stereographic_projection(phi_degree, psi_degree):\n psi_rad = psi_degree *np.pi/180\n psi_stereo = 2*np.tan(psi_rad/2)\n\n phi_rad = phi_degree *np.pi/180\n return phi_rad, psi_stereo", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list", "def get_preamble_z(self):\n a = PhysicalLayer.get_preamble()\n return 2,np.array([z for z in a['symb'][0:31] for _ in range(self._sps)])", "def parameters(self):\n # encoded in θ\n return self.theta.columns", "def get_projection_params(fname):\n # Read strings from the region file\n with open(fname, 'r') as f:\n lines = f.readlines()\n wcs = lines[2][:-1] # Coordinate system TODO bad variable name\n if lines[0] != '# Region file format: DS9 version 4.1\\n':\n print 'Warning: potentially invalid region file!'\n print 'First line was: ' + lines[0]\n if wcs != 'fk5':\n raise Exception('Regions must be in sky (fk5) coordinates; got ' +\n wcs + 'instead')\n\n # Manipulate each string and save to list\n lines = filter(lambda x: '# projection' in x, lines[3:])\n projspecs = []\n for ln in lines:\n lnsplit = ln[2:-1].split(')') # Remove leading '#', trailing '\\n'\n if lnsplit[1] == '':\n r = lnsplit[0] + ')' # No optional arguments\n else:\n r = lnsplit[0] + ') #' + lnsplit[1] # Add octothorpe\n projspecs.append('%s; %s' % (wcs, r))\n\n return projspecs", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def find_start_pose(self):\n\n # Find start position\n y,x = [k for k,v in self.mp.items() if v == 94 or v == 60 \\\n or v == 62 or v == 118][0]\n\n\n # Assign orientation\n dy,dx, theta = 0,0, 0\n if self.mp[y,x] == ord('^'): theta = np.pi/2\n elif mp[y,x] == ord('<'): theta = -np.pi\n elif mp[y,x] == ord('>'): theta = 0\n else: theta = -np.pi/2\n\n return y, x, theta", "def get_phase_space(self, grid_flag):\n\n f = h5py.File(self.xs_path, 'r')\n self.N = f['paramdescrip']['NVALUE'].value # det maximum range Ni for each d_i\n phase_space = {}\n order = {}\n NPAR = f['paramdescrip']['NPAR'].value[0]\n for di in range(NPAR - 1):\n di_name = f['paramdescrip']['PARNAM'].value[di] # get names for dimensions. Starts at 0\n # get values for dimensions. Starts at 1. e.g. 'BURNUP': array([ 0., 9.35253143, 18.70503998,..\n # Is saved as a np.array, of floats64 FORTRAN-contiguous\n phase_space[di_name] = np.array([float(val) for val in f['paramvaleurs'][\n 'pval %d' % (di + 1)].value], order='F')\n order[di] = di_name # e.g. '7': 'BURNUP'\n\n iso_aux = []\n # just concatenate those two\n for iso in f['contenu']['NOMISO'].value[:]:\n iso_aux.append(iso)\n for iso in f['contenu']['NOMMAC'].value[:]:\n iso_aux.append(iso)\n f.close()\n self.iso_A2 = iso_aux\n\n # USER IMPOSED: Non-independant variables set to [0].\n \"\"\"\n *Do not eliminate them, this will bring problems with the cartesin product later one\n *if instead of '[phase_space['PHASE'][0]]' (which is equal to 1) just '[1]' is written then np.where() does not recognize the value.\n\n This two problems rise from the decision of defining the 'space of interest' as a subset from the 'phase space' which in time is read directly from the H5F file. Later several comparisons are made between the two. The upside is the need for no explicit declaration of the phase-space thus minimizing chances of un-noticed error in domain assignation.\n \"\"\"\n if 'PHASE' in phase_space.keys():\n phase_space['PHASE'] = [phase_space['PHASE'][0]]\n if 'BURNUPstep' in phase_space.keys():\n phase_space['BURNUPstep'] = [phase_space['BURNUPstep'][0]]\n\n if grid_flag == 'SG': # major update required\n \"\"\"\n In contras to FG, the stored values in the concatenated SAPHYB file only considers different burnup steps, i.e a set of values [0, 500, 500, 100] are stored as [0, 500, 100]. Two posibilities remain, read the BURNUP value from the single XS files separatly or load a pickeled object with the phase space. The second option was implemented.\n \"\"\"\n with open(self.file_path + self.xs_folder + 'phase_space.pickle', 'rb') as handle:\n phase_space_pk = pickle.load(handle)\n phase_space_pk.pop('a')\n phase_space_pk.pop('d')\n phase_space_pk.pop('l')\n phase_space_pk.pop('BURNUP_evol')\n phase_space_pk.pop('BURNUP_steps')\n phase_space = phase_space_pk\n\n self.phase_space, self.order, self.d, self.NPAR = phase_space, order, len(order), NPAR", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def get_probeLocs_calib_setup(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*1e-3*25.4, -4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4]\n y_pos = [-4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4, -4.25*1e-3*25.4]\n z_pos = [-2.25*1e-3*25.4, -0.75*1e-3*25.4, 0.75*1e-3*25.4, 2.25*1e-3*25.4]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def get_probeLocs_calib_setup_cm(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*2.54, -4.25*2.54, 4.24*2.54, 4.24*2.54]\n y_pos = [-4.25*2.54, 4.24*2.54, 4.24*2.54, -4.25*2.54]\n z_pos = [-2.25*2.54, -0.75*2.54, 0.75*2.54, 2.25*2.54]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def _sims_header(self, hdr):\n # Called DefAnalysisBis and DefEps in OpenMIMS\n d = {}\n d['simsheader version'], d['original filename'], d['matrix'], \\\n d['sigref auto'], d['sigref points'], d['sigref delta'], \\\n d['sigref scan time'], d['sigref measure time'], \\\n d['sigref beam on time'], d['eps centering enabled'], \\\n d['eps enabled'], d['eps central energy'], d['eps b field'] = \\\n unpack(self._bo + 'i 256s 256s 10i', hdr.read(556))\n\n d['EPSCentralSpecies'] = self._species(hdr)\n d['EPSReferenceSpecies'] = self._species(hdr)\n\n # Don't know how long method name is, runs into null-padded zone.\n d['eps ref mass tube hv'], d['eps ref mass tube hv max var'], \\\n d['sample rotation'], d['sample rotation speed'], \\\n d['sample rotation synced'], d['sample name'], \\\n d['user name'], d['method name'] = \\\n unpack(self._bo + '2d 3i 80s 32s 256s', hdr.read(396))\n\n d['original filename'] = self._cleanup_string(d['original filename'])\n d['matrix'] = self._cleanup_string(d['matrix'])\n d['sample name'] = self._cleanup_string(d['sample name'])\n d['user name'] = self._cleanup_string(d['user name'])\n d['method name'] = self._cleanup_string(d['method name'])\n\n d['sigref auto'] = bool(d['sigref auto'])\n d['eps centering enabled'] = bool(d['eps centering enabled'])\n d['eps enabled'] = bool(d['eps enabled'])\n d['sample rotation'] = bool(d['sample rotation'])\n d['sample rotation synced'] = bool(d['sample rotation synced'])\n d['sigref scan time'] /= 10 # 0.1 sec increments\n return d", "def calculateSipWcsHeader(wcs, order, bbox, spacing, header=None):\n transform = getPixelToIntermediateWorldCoords(wcs)\n crpix = wcs.getPixelOrigin()\n cdMatrix = wcs.getCdMatrix()\n crval = wcs.getSkyOrigin()\n gridNum = Extent2I(int(bbox.getWidth()/spacing + 0.5), int(bbox.getHeight()/spacing + 0.5))\n\n sip = SipApproximation(transform, crpix, cdMatrix, Box2D(bbox), gridNum, order)\n\n md = makeTanSipMetadata(sip.getPixelOrigin(), crval, sip.getCdMatrix(), sip.getA(), sip.getB(),\n sip.getAP(), sip.getBP())\n\n if header is not None:\n header.combine(md)\n else:\n header = md\n\n return header", "def orbitproject(x,y,inc,phi=0,psi=0):\n\n x2 = x*np.cos(phi) + y*np.sin(phi)\n y2 = -x*np.sin(phi) + y*np.cos(phi)\n z2 = y2*np.sin(inc)\n y2 = y2*np.cos(inc)\n\n xf = x2*np.cos(psi) - y2*np.sin(psi)\n yf = x2*np.sin(psi) + y2*np.cos(psi)\n\n return (xf,yf,z2)", "def _nanosims_header(self, hdr):\n # Called MaskNano in OpenMIMS; BFieldTab separated out; create extra sub-dict PeakCenter\n d = {}\n d['PeakCenter'] = {}\n d['nanosimsheader version'], d['regulation mode'], d['mode'], \\\n d['grain mode'], d['semigraphic mode'], d['stage delta x'], \\\n d['stage delta y'], d['working frame width'], \\\n d['working frame height'], d['scanning frame x'], \\\n d['scanning frame width'], d['scanning frame y'], \\\n d['scanning frame height'], d['counting frame x start'], \\\n d['counting frame x end'], d['counting frame y start'], \\\n d['counting frame y end'], d['detector type'], d['electron scan'], \\\n d['scanning mode'], d['beam blanking'], \\\n d['PeakCenter']['peakcenter enabled'], d['PeakCenter']['start'], \\\n d['PeakCenter']['frequency'], d['b fields'] = \\\n unpack(self._bo + '25i', hdr.read(100))\n\n d['PeakCenter']['peakcenter enabled'] = bool(d['PeakCenter']['peakcenter enabled'])\n d['regulation mode'] = bool(d['regulation mode'])\n d['grain mode'] = bool(d['grain mode'])\n d['semigraphic mode'] = bool(d['semigraphic mode'])\n d['scanning mode'] = bool(d['scanning mode'])\n\n # Set a few extra variables.\n d['counting frame width'] = d['counting frame x end'] - d['counting frame x start'] + 1\n d['counting frame height'] = d['counting frame y end'] - d['counting frame y start'] + 1\n\n # Found in at least one version (file v11, nsHeader v8) a repeat of\n # Poly_list and this first part of nanoSIMSHeader. Total of repeat\n # adds up to 288. After last Poly_list, 288 byte padding zone, not all\n # null-bytes.\n hdr.seek(288, 1)\n\n # Is this the nPrintRed from OpenMIMS?\n d['print results'] = bool(unpack(self._bo + 'i', hdr.read(4))[0])\n\n d['SibCenterHor'] = self._sib_center(hdr)\n d['SibCenterVert'] = self._sib_center(hdr)\n\n # Duplicate and store these two in sub dicts\n b_field_index, has_sib_center = \\\n unpack(self._bo + '2i', hdr.read(8))\n if b_field_index < 0:\n b_field_index = None\n has_sib_center = bool(has_sib_center)\n\n d['SibCenterHor']['b field index'] = b_field_index\n d['SibCenterVert']['b field index'] = b_field_index\n d['SibCenterHor']['sib center enabled'] = has_sib_center\n d['SibCenterVert']['sib center enabled'] = has_sib_center\n\n d['EnergyCenter'] = self._energy_center(hdr)\n d['E0SCenter'] = self._e0s_center(hdr)\n\n d['EnergyCenter']['wait time'], d['presputtering raster'], \\\n d['PeakCenter']['E0P offset'], d['E0SCenter']['steps'], \\\n d['baseline measurement'], d['baseline offset'], \\\n d['baseline frequency'] = \\\n unpack(self._bo + '5i d i', hdr.read(32))\n return d", "def sph2car(r, theta, phi):\n x = r * np.sin(theta) * np.cos(phi)\n y = r * np.sin(theta) * np.sin(phi)\n z = r * np.cos(theta)\n\n return x, y, z", "def read_projection(fname, element, theta_index):\n\n projections = dxchange.read_hdf5(fname, \"MAPS/XRF_roi\")\n theta = float(dxchange.read_hdf5(fname, \"MAPS/extra_pvs_as_csv\")[theta_index].split(b',')[1])\n elements = read_channel_names(fname)\n\n try:\n if find_index(elements, element) != None:\n return projections[find_index(elements, element),:, :], theta\n else:\n raise TypeError\n except TypeError:\n print(\"**** ERROR: Element %s does exist in the file: %s \" % (element, fname))\n return None", "def sat_2d_pos(theta):\n r_sat = a * (1 - e**2) / (1 + e * np.cos(theta))\n return r_sat, theta", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def info(self):\n tline = \"\"\n for (ii, projection) in enumerate(self._ProjectionList):\n tiltAngle = projection._tiltAngle\n transX = -projection._alignmentTransX\n transY = -projection._alignmentTransY\n rot = -(projection._alignmentRotation + 90.)\n mag = projection._alignmentMagnification\n tline = tline + (\"%3d: \" % ii)\n tline = tline + (\"%15s; \" % projection._filename)\n tline = tline + (\"tiltAngle=%9.3f; \" % tiltAngle)\n tline = tline + (\"transX=%9.3f; \" % transX)\n tline = tline + (\"transY=%9.3f; \" % transY)\n tline = tline + (\"rot=%9.3f; \" % rot)\n tline = tline + (\"mag=%9.3f\\n\" % mag)\n print(tline)", "def parse_annotations(Hinv, obsmat_txt):\n\n def to_image_frame(loc):\n \"\"\"\n Given H^-1 and (x, y, z) in world coordinates,\n returns (u, v, 1) in image frame coordinates.\n \"\"\"\n loc = np.dot(Hinv, loc) # to camera frame\n return loc / loc[2] # to pixels (from millimeters)\n\n mat = np.loadtxt(obsmat_txt)\n num_peds = int(np.max(mat[:, 1])) + 1\n peds = [np.array([]).reshape(0, 4) for _ in range(num_peds)] # maps ped ID -> (t,x,y,z) path\n\n num_frames = (mat[-1, 0] + 1).astype(\"int\")\n num_unique_frames = np.unique(mat[:, 0]).size\n recorded_frames = [-1] * num_unique_frames # maps timestep -> (first) frame\n peds_in_frame = [[] for _ in range(num_unique_frames)] # maps timestep -> ped IDs\n\n frame = 0\n time = -1\n blqk = False\n for row in mat:\n if row[0] != frame:\n frame = int(row[0])\n time += 1\n recorded_frames[time] = frame\n\n ped = int(row[1])\n\n peds_in_frame[time].append(ped)\n loc = np.array([row[2], row[4], 1])\n loc = to_image_frame(loc)\n loc = [time, loc[0], loc[1], loc[2]]\n peds[ped] = np.vstack((peds[ped], loc))\n\n return recorded_frames, peds_in_frame, peds", "def extract_calibration(self):\n #TODO add function to check if the folder exists because opencv points to other error rather than saying it doesnt exist\n cv_file = cv2.FileStorage(\"calib_images/calibration.yaml\", cv2.FILE_STORAGE_READ)\n camera_matrix = cv_file.getNode(\"camera_matrix\").mat()\n dist_matrix = cv_file.getNode(\"dist_coeff\").mat()\n print(\"[INFO]: Extracted camera parameters.\")\n cv_file.release()\n return camera_matrix, dist_matrix", "def eclipse_parameters(sat, earth, sun, time):\n\n position = earth + sat\n barycentric_e = position.at(time).observe(earth)\n barycentric_s = position.at(time).observe(sun)\n _, _, distance_to_earth = barycentric_e.radec()\n _, _, distance_to_sun = barycentric_s.radec()\n theta_e = semidiameter(earthlib.earth_radius_au, distance_to_earth.au)\n theta_s = semidiameter(0.00465, distance_to_sun.au) # Sun's average radius in AU = 0.00465\n theta = barycentric_e.separation_from(barycentric_s).radians\n return theta, theta_e, theta_s", "def getProjections(self): \n x, y, z = self.XYZCoordinate\n origin = self.SkeletonPoints[0]\n self.coorOrigin = origin\n self.XYProjections = [GeometryToolBox.projected_point(p, origin, x, y) for p in self.SkeletonPoints]\n self.XZProjections = [GeometryToolBox.projected_point(p, origin, x, z) for p in self.SkeletonPoints]", "def process(path):\n # get parameter value:\n with open('config.cym', 'r') as f:\n line = f.readline()\n #print(line)\n pam = float(line[1:])\n f.close()\n # get position of aster:\n with open('aster.txt', 'r') as f:\n for line in f:\n if len(line)>3 and not line[0]=='%':\n #print(line)\n val = line.split()\n x = float(val[2])\n y = float(val[3])\n #z = float(val[4])\n #pos = math.sqrt(x*x+y*y+z*z)\n pos = math.sqrt(x*x+y*y)\n\n f.close()\n return (pam, pos)", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def find_cea_coord(header,phi_c,lambda_c,nx,ny,dx,dy):\n nx = int(nx)\n ny = int(ny)\n\n # Array of CEA coords\n x = []\n y = []\n\n for j in range(ny):\n col = []\n row = []\n for i in range(nx):\n col.append(np.radians((i-(nx-1)/2)*dx))\n row.append(np.radians((j-(ny-1)/2)*dy))\n x.append(col)\n y.append(row)\n\n x = np.array(x)\n y = np.array(y)\n\n # Relevant header values\n rSun = header['rsun_obs']/header['cdelt1'] #solar radius in pixels\n disk_latc = np.radians(header['CRLT_OBS'])\n disk_lonc = np.radians(header['CRLN_OBS'])\n disk_xc = header['CRPIX1'] - 1 #disk center wrt lower left of patch\n disk_yc = header['CRPIX2'] - 1\n pa = np.radians(header['CROTA2']*-1)\n\n latc = np.radians(lambda_c)\n lonc = np.radians(phi_c) - disk_lonc\n\n # Convert coordinates\n lat = []\n lon = []\n xi = []\n eta = []\n\n for j in range(ny):\n lat_col = []\n lon_col = []\n xi_col = []\n eta_col = []\n for i in range(nx):\n lat0,lon0 = plane2sphere(x[j,i],y[j,i],latc,lonc)\n lat_col.append(lat0)\n lon_col.append(lon0)\n\n xi0,eta0 = sphere2img(lat0,lon0,disk_latc,0.0,disk_xc,disk_yc,rSun,pa)\n xi_col.append(xi0)\n eta_col.append(eta0)\n lat.append(lat_col)\n lon.append(lon_col)\n xi.append(xi_col)\n eta.append(eta_col)\n\n lat = np.array(lat)\n lon = np.array(lon)\n xi = np.array(xi)\n eta = np.array(eta)\n\n return xi,eta,lat,lon", "def GetProjectionParams( File ):\n dataset = gdal.Open( File )\n SubDatasets = dataset.GetSubDatasets()\n\n SubDataset = gdal.Open( SubDatasets[0][0] )\n Projection = SubDataset.GetProjection()\n GeoTransform = SubDataset.GetGeoTransform()\n\n return Projection, GeoTransform", "def getTranslationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment in separate array - easier for optimization\n nprojs = len(TiltSeries_._ProjectionList._list)\n self._alignmentTransX = nprojs * [0.]\n self._alignmentTransY = nprojs * [0.]\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n self._alignmentTransX[kk] = proj.getAlignmentTransX()\n self._alignmentTransY[kk] = proj.getAlignmentTransY()\n return self._alignmentTransX, self._alignmentTransY", "def params(self):\n return [cq.Symbol('theta_0')]", "def _target_xy(header, outwcs):\n tgt_x, tgt_y = None, None\n tgt_ra = header.get('TGTRA', None)\n tgt_dec = header.get('TGTDEC', None)\n if tgt_ra is not None and tgt_dec is not None \\\n and not np.allclose([tgt_ra, tgt_dec], 0):\n # convert from hours to degrees\n tgt_ra *= 15.0\n if outwcs.wcs.naxis == 2:\n tgt_x, tgt_y = \\\n outwcs.wcs_world2pix(tgt_ra, tgt_dec, 0)\n else:\n tgt_w, tgt_y, tgt_x = \\\n outwcs.wcs_world2pix(0, tgt_dec, tgt_ra, 0)\n return tgt_x, tgt_y", "def make_wcsheader(ra=40.07293, dec=-1.6137748, size=2, pixscale=0.1, get_hdu=False, theta=0):\n \n if np.isscalar(pixscale):\n cdelt = [pixscale/3600.]*2\n else:\n cdelt = [pixscale[0]/3600., pixscale[1]/3600.]\n \n if np.isscalar(size):\n npix = np.cast[int]([size/pixscale, size/pixscale])\n else:\n npix = np.cast[int]([size[0]/pixscale, size[1]/pixscale])\n \n hout = pyfits.Header()\n hout['CRPIX1'] = npix[0]/2\n hout['CRPIX2'] = npix[1]/2\n hout['CRVAL1'] = ra\n hout['CRVAL2'] = dec\n hout['CD1_1'] = -cdelt[0]\n hout['CD1_2'] = hout['CD2_1'] = 0.\n hout['CD2_2'] = cdelt[1]\n hout['NAXIS1'] = npix[0]\n hout['NAXIS2'] = npix[1]\n hout['CTYPE1'] = 'RA---TAN'\n hout['CTYPE2'] = 'DEC--TAN'\n \n wcs_out = pywcs.WCS(hout)\n \n theta_rad = np.deg2rad(theta)\n mat = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n\n rot_cd = np.dot(mat, wcs_out.wcs.cd)\n \n for i in [0,1]:\n for j in [0,1]:\n hout['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n wcs_out.wcs.cd[i,j] = rot_cd[i,j]\n \n cd = wcs_out.wcs.cd\n wcs_out.pscale = get_wcs_pscale(wcs_out) #np.sqrt((cd[0,:]**2).sum())*3600.\n \n if get_hdu:\n hdu = pyfits.ImageHDU(header=hout, data=np.zeros((npix[1], npix[0]), dtype=np.float32))\n return hdu\n else:\n return hout, wcs_out", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def euler2Q(self, (phi, theta, psi)):\n\thalf_phi = 0.5*phi;\n\thalf_theta = 0.5*theta;\n\thalf_psi = 0.5*psi;\n\n return np.asarray([\n (cos(half_phi)*cos(half_theta)*cos(half_psi)) + (sin(half_phi)*sin(half_theta)*sin(half_psi)),\n (sin(half_phi)*cos(half_theta)*cos(half_psi)) - (cos(half_phi)*sin(half_theta)*sin(half_psi)),\n (cos(half_phi)*sin(half_theta)*cos(half_psi)) + (sin(half_phi)*cos(half_theta)*sin(half_psi)),\n (cos(half_phi)*cos(half_theta)*sin(half_psi)) - (sin(half_phi)*sin(half_theta)*cos(half_psi))\n ]);", "def ROCKSTAR_binary():\n header_size = 256 #Bytes, size of the header\n halo_struct_size = 264 #Bytes, properties stored for one halo using dtype structure dt (260 from struct 'halo' in halo.h from ROCKSTAR and \n #4 bytes probably from max_metric from struct 'extra_halo_info' in halo.h)\n bytes_to_header_info = 64 #bytes until the header info starts\n \n dt_header_info = [ \n ('n_halos' , np.int64), #total number of halos in this file\n ('tot_n_particles' , np.int64), #total number of particles in this file \n ('box_size' , np.float32), #side lenght in Mpc/h of simulation box\n ('m_particles' , np.float32), #mass of one particle in h-1Msun\n ('type_particles' , np.int64) #type of particle (either 1=halo, star, gas etc.) \n ]\n \n dt = [\n ('haloid' , np.int64), #int64_t id\n ('x_pos' , np.float32), #float pos[6], 1\n ('y_pos' , np.float32), #float pos[6], 2\n ('z_pos' , np.float32), #float pos[6], 3\n ('pos4' , np.float32), #float pos[6], 4\n ('pos5' , np.float32), #float pos[6], 5\n ('pos6' , np.float32), #float pos[6], 6 \n ('x_corevel' , np.float32), #float corevel[3], 1\n ('y_corevel' , np.float32), #float corevel[3], 2\n ('z_corevel' , np.float32), #float corevel[3], 3 \n ('x_vel_bulk' , np.float32), #float bulkvel[3], 1\n ('y_vel_bulk' , np.float32), #float bulkvel[3], 2\n ('z_vel_bulk' , np.float32), #float bulkvel[3], 3\n ('mhalo' , np.float32), #float m \n ('rvir' , np.float32), #float r \n ('rvir_child' , np.float32), #float child_r\n ('vmax_r' , np.float32), #float vmax_r\n ('mhalo_bound' , np.float32), #float mgrav\n ('vmax' , np.float32), #float vmax\n ('vpeak' , np.float32), #float rvmax\n ('rscale' , np.float32), #float rs\n ('rscale_Klypin' , np.float32), #float klypin_rs\n ('vrms' , np.float32), #float vrms\n ('x_ang' , np.float32), #float J[3], 1\n ('y_ang' , np.float32), #float J[3], 2\n ('z_ang' , np.float32), #float J[3], 3\n ('energy' , np.float32), #float energy \n ('spinParameter' , np.float32), #float spin\n ('mhalo_200b' , np.float32), #float alt_m[4], 1 \n ('mhalo_200c' , np.float32), #float alt_m[4], 2 \n ('mhalo_500c' , np.float32), #float alt_m[4], 3 \n ('mhalo_2500c' , np.float32), #float alt_m[4], 4 \n ('x_off' , np.float32), #float Xoff\n ('v_off' , np.float32), #float Voff\n ('b_to_a' , np.float32), #float b_to_a \n ('c_to_a' , np.float32), #float c_to_a\n ('x_a' , np.float32), #float A[3], 1\n ('y_a' , np.float32), #float A[3], 2\n ('z_a' , np.float32), #float A[3], 3 \n ('b_to_a_500c' , np.float32), #float b_to_a2\n ('c_to_a_500c' , np.float32), #float c_to_a2\n ('x_a_500c' , np.float32), #float A2[3], 1 \n ('y_a_500c' , np.float32), #float A2[3], 2\n ('z_a_500c' , np.float32), #float A2[3], 3 \n ('spin_Bullock' , np.float32), #float bullock_spin\n ('T_U' , np.float32), #float kin_to_pot\n ('Mpseudo_Behroozi', np.float32), #float m_pe_b \n ('Mpseudo_Diemer' , np.float32), #float m_pe_d\n ('rhalf_mass' , np.float32), #float halfmass_radius\n ('n_particles' , np.int64), #int64_t num_p\n ('n_particles_child', np.int64), #int64_t num_child_particles \n ('p_start' , np.int64), #int64_t p_start\n ('descIndex' , np.int64), #int64_t desc\n ('flags' , np.int64), #int64_t flags\n ('n_core' , np.int64), #int64_t n_core\n ('PosUncertainty' , np.float32), #float min_pos_err\n ('VelUncertainty' , np.float32), #float min_vel_err\n ('BulkVelUnc' , np.float32), #float min_bulkvel_err\n ('mmetric' , np.float32) #unclear where it comes from, it might be mmetric \n ]\n \n return header_size, halo_struct_size, dt, dt_header_info, bytes_to_header_info", "def get_calib_from_header(header):\n\n prefix = 'HIERARCH GAMSE WLCALIB '\n\n xorder = header[prefix+'XORDER']\n yorder = header[prefix+'YORDER']\n\n coeff = np.zeros((yorder+1, xorder+1))\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n coeff[j,i] = header[prefix+'COEFF {:d} {:d}'.format(j, i)]\n\n calib = {\n 'coeff': coeff,\n 'npixel': header[prefix+'NPIXEL'],\n 'k': header[prefix+'K'],\n 'offset': header[prefix+'OFFSET'],\n 'std': header[prefix+'STDDEV'],\n 'nuse': header[prefix+'NUSE'],\n 'ntot': header[prefix+'NTOT'],\n# 'identlist': calibwindow.identlist,\n 'window_size': header[prefix+'WINDOW_SIZE'],\n 'xorder': xorder,\n 'yorder': yorder,\n 'maxiter': header[prefix+'MAXITER'],\n 'clipping': header[prefix+'CLIPPING'],\n 'q_threshold': header[prefix+'Q_THRESHOLD'],\n 'direction': header[prefix+'DIRECTION'],\n }\n return calib", "def get_y(theta, phi, w, r=1):\n return np.sin(phi) * r * np.cos(w) + np.cos(phi) * np.cos(theta) * r * np.sin(w)", "def get_sip_keywords(header):\n cd = np.matrix([[header.get('CD1_1', 0.0), header.get('CD1_2', 0.0)],\n [header.get('CD2_1', 0.0), header.get('CD2_2', 0.0)]], dtype=np.float64)\n a_order = int(header.get('A_ORDER', 0))\n b_order = int(header.get('B_ORDER', 0))\n ac = np.matrix(np.zeros((a_order+1, a_order+1), dtype=np.float64))\n bc = np.matrix(np.zeros((b_order+1, b_order+1), dtype=np.float64))\n for m in range(a_order+1):\n for n in range(0, a_order+1-m):\n ac[m, n] = header.get('A_%d_%d' % (m, n), 0.0)\n for m in range(b_order+1):\n for n in range(0, b_order+1-m):\n bc[m, n] = header.get('B_%d_%d' % (m, n), 0.0)\n return cd, ac, bc", "def get_pose(self, obs: np.array) -> Tuple[bool, Optional[Tuple[np.array, float]]]:\n detection, centers = cv2.findCirclesGrid(obs,\n patternSize=(self.width, self.height),\n flags=cv2.CALIB_CB_SYMMETRIC_GRID,\n blobDetector=self.detector)\n if detection:\n image_points = centers[:, 0, :]\n _, rotation_vector, translation_vector = cv2.solvePnP(objectPoints=self.circle_pattern,\n imagePoints=image_points,\n cameraMatrix=self.camera_matrix,\n distCoeffs=self.distortion_coefs)\n else:\n return detection, None\n\n theta = rotation_vector[1][0]\n\n x_global = translation_vector[2][0]\n y_global = translation_vector[0][0]\n z_global = translation_vector[1][0]\n\n x_but_global = x_global - self.target_distance\n y_but_global = y_global\n z_but_global = z_global\n\n x_but_robot = x_but_global * np.cos(theta)\n y_but_robot = y_but_global * np.cos(theta)\n z_but_robot = z_but_global\n\n return detection, (np.array([y_but_robot, z_but_robot, x_but_robot]), -np.rad2deg(theta))", "def get_params(self):\n\n params={'f_star':self.get_f_star(), 'g_star':self.get_g_star(), \n 'Delta2_star':self.get_Delta2_star(), \n 'n_star':self.get_n_star(), 'alpha_star':self.get_alpha_star()}\n\n return params", "def _sib_center(self, hdr):\n # Called SecIonBeamNano in OpenMIMS\n d = {}\n d['detector'], d['start'], d['step size'], d['center'], \\\n d['50% width'], d['count time'] = \\\n unpack(self._bo + '3i 4x 2d i 4x', hdr.read(40))\n\n if d['detector'] < 0:\n d['detector'] = None\n d['count time'] /= 100 # 10 ms increments to seconds\n return d", "def find_start_parameter_wo_theta(image):\n if image.ndim < 2 or image.size == 0 :\n print(\"Error: Wrong image dimension: {}; or image size is zero\".format(image.ndim))\n print(\"Abort finding start parameters\")\n return None\n\n try:\n # image size: x = cols, y = rows\n sz_x, sz_y = image.shape[1], image.shape[0]\n\n # Maximum as amplitude\n amplitude, offset = np.max(img), np.min(img)\n\n # Location of 2D maximum as xo, yo\n amp_xo, amp_yo = np.array(np.unravel_index(np.argmax(image), image.shape))[::-1]\n\n # Standard deviation from maximum location\n sigma_x, sigma_y = map(lambda x: np.round(x * (1 - 0.69)), [amp_xo, amp_yo])\n\n # Gather start fit parameter for returning\n res = [amplitude, amp_xo, amp_yo, sigma_x, sigma_y, offset]\n\n except:\n print(\"Warning: Finding suitable start parameters failed!\")\n res = None\n\n return res", "def read_header(self):\n # Read entire header into memory in one read to minimize Disk I/O.\n self.fh.seek(0)\n hdr = self.fh.read(self.header['header size'])\n\n # Find several markers in the byte-string\n # Each of these may occur more than once, find last.\n polylist_pos = hdr.rfind(b'Poly_list\\x00')\n champslist_pos = hdr.rfind(b'Champs_list\\x00')\n offsetlist_pos = hdr.rfind(b'Offset_list\\x00')\n\n # Find first occurance for these.\n # analparam_pos = hdr.find(b'Anal_param\\x00')\n analparamnano_pos = hdr.find(b'Anal_param_nano\\x00')\n analparamnanobis_pos = hdr.find(b'Anal_param_nano_bis\\x00')\n\n # Turn byte-string into BytesIO file-like object; reading and\n # keeping track of where we are is easier that way than trying to\n # slice byte-string as an array and keeping track of indices.\n hdr = io.BytesIO(hdr)\n\n # Main header\n hdr.seek(12)\n self.header.update(self._main_header(hdr))\n\n # NanoSIMS header, starts with PolyList/ChampsList/OffsetList\n # The following configurations have been found in the wild, so far:\n # 1. NS header\n # 2. PL, NS header\n # 3. PL, CL, OL, NS header\n # 4. PL, CL, OL, partial NS header, PL, NS header, PL, CL, OL,\n # partial NS header, PL, NS header\n # Note: I have not seen any *lists with contents (only length 0).\n # From OpenMIMS documentation I know that PolyList is as list of\n # Species dicts, but don't know how to read ChampsList or OffsetList.\n if polylist_pos < 0:\n # Case 1: No PL marker, so far only found for Real Time Images,\n # beam stability, or secondary ion beam centering files.\n if (self.header['analysis type'].endswith('rti') or\n self.header['file type'] == 35):\n hdr.seek(216, 1)\n elif self.header['file type'] == 31:\n if (self.header['analysis type'].endswith('hmr') or\n self.header['analysis type'].endswith('trolley step scan')):\n hdr.seek(120, 1)\n else:\n # secondary ion beam\n hdr.seek(600, 1)\n else:\n raise NotImplementedError('No PolyList marker found in header '\n 'and not and RTI image. Don\\'t know '\n 'how to continue.')\n elif (champslist_pos < 0 and offsetlist_pos < 0):\n # Case 2: PL, NS header\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n elif (polylist_pos < champslist_pos < offsetlist_pos):\n # Case 3: PL, CL, OL, NS header\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)\n self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)\n elif (champslist_pos < offsetlist_pos < polylist_pos):\n # Case 4: PL, CL, OL, partial NS header, PL, NS header\n # with possible repeat\n self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)\n self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n else:\n raise NotImplementedError(\n 'An unknown order of the Poly/Champs/Offset Lists occured.\\n'\n 'Positions: PL = {}, CL = {}, OL = {}'\n ''.format(polylist_pos, champslist_pos, offsetlist_pos))\n\n self.header['NanoSIMSHeader'] = self._nanosims_header(hdr)\n\n # How much to skip? Chomping does not work; what if first value is 0?\n # This is correct so far, for nsheader v8 and 9\n hdr.seek(948, 1)\n self.header['BFields'] = []\n for b in range(self.header['NanoSIMSHeader']['b fields']):\n bf = self._bfield(hdr)\n bf['counting frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['counting frame height'] * \\\n self.header['NanoSIMSHeader']['counting frame width']\n bf['scanning frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['scanning frame height'] * \\\n self.header['NanoSIMSHeader']['scanning frame width']\n bf['working frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['working frame height'] * \\\n self.header['NanoSIMSHeader']['working frame width']\n self.header['BFields'].append(bf)\n # End nanosims_header/bfield based on Poly_list position\n\n # Analytical parameters\n\n # anal_param is not in OpenMIMS at all, represents file\n # Cameca NanoSIMS Data/raw_spec/cur_anal_par\n # However, only few useful things in this section, all of\n # which are also in other sections. Skip.\n # if analparam_pos < 0:\n # msg = 'Anal_param not found in header, skipping.'\n # warnings.warn(msg)\n # else:\n # hdr.seek(analparam_pos + 24)\n # print(analparam_pos)\n # d = {}\n # d['primary ion'], d['primary current begin'], \\\n # d['primary current end'], d['raster'], \\\n # d['X 00 always 1.0'], \\\n # d['X 01 always 1'], d['X 02 always 0'], \\\n # d['X 03 always 1'], d['X 04 always 0'], \\\n # d['X 05 always 0'], d['X 06 (not0 always 0'], \\\n # d['X 07 (not) always 0'], d['X 08 always 0'], \\\n # d['pressure 1'], d['e0w'], d['X 09 always 35 or #'], \\\n # d['X 10 junk'], \\\n # d['X 11 always 1'], d['X 12 always 0'], \\\n # d['X 13 always 1'], d['X 14 always 0'], \\\n # d['X 15 always 0'], d['X 16 always 0'], \\\n # d['X 17 always 0'], d['X 18 always 0'], \\\n # d['X 19 always 0'], d['X 20 always 300'], \\\n # d['X 21'], d['X 22'], d['X 23'], d['X 24'], \\\n # d['pressure 2'], d['X 25 junk'] = \\\n # unpack(self._bo + '24s 4d 8i 48s d i 28s 14i 8s 176s', hdr.read(416))\n #\n # d['pressure 1'] = self._cleanup_string(d['pressure 1'])\n # d['pressure 2'] = self._cleanup_string(d['pressure 2'])\n # d['primary ion'] = self._cleanup_string(d['primary ion'])\n #\n # self.header['AnalParam'] = d\n\n # Called AnalyticalParamNano AND AnalysisParamNano in OpenMIMS.\n # Here, split out Primary and Secondary beam.\n # Represents the file Cameca NanoSIMS Data/raw_spec/cur_anal_par_nano\n if analparamnano_pos < 0:\n msg = 'Anal_param_nano not found in header, '\n msg += 'don\\'t know where PrimaryBeam section starts.'\n warnings.warn(msg)\n else:\n hdr.seek(analparamnano_pos + 16)\n self.header['analysis version'], self.header['n50large'], \\\n self.header['comment'] = \\\n unpack(self._bo + '2i 8x 256s', hdr.read(272))\n\n self.header['n50large'] = bool(self.header['n50large'])\n self.header['comment'] = self._cleanup_string(self.header['comment'])\n\n self.header['PrimaryBeam'] = self._primary_beam(hdr)\n self.header['SecondaryBeam'] = self._secondary_beam(hdr)\n self.header['Detectors'] = self._detectors1(hdr)\n\n self.header['SecondaryBeam']['E0S'] = self.header['Detectors'].pop('E0S')\n self.header['SecondaryBeam']['pressure multicollection chamber'] = \\\n self.header['Detectors'].pop('pressure multicollection chamber')\n\n # Add overall mode of machine, based on E0W\n if self.header['SecondaryBeam']['E0W'] < 0:\n self.header['polarity'] = '+'\n else:\n self.header['polarity'] = '-'\n\n # Combine pixel size from NanoSIMSHeader and raster from PrimaryBeam\n # Prevent ZeroDivisionError if undefined\n wfw = self.header['NanoSIMSHeader']['working frame width']\n if not wfw:\n wfw = 1\n self.header['NanoSIMSHeader']['working frame raster'] = \\\n self.header['PrimaryBeam']['raster']\n self.header['NanoSIMSHeader']['scanning frame raster'] = \\\n self.header['NanoSIMSHeader']['working frame raster'] * \\\n self.header['NanoSIMSHeader']['scanning frame width'] / wfw\n self.header['NanoSIMSHeader']['counting frame raster'] = \\\n self.header['NanoSIMSHeader']['working frame raster'] * \\\n self.header['NanoSIMSHeader']['counting frame width'] / wfw\n\n # Header for non-nano SIMS\n magic = unpack(self._bo + 'i', hdr.read(4))[0]\n if magic != 2306:\n msg = 'SIMSHeader magic number not found here at byte {}.'\n msg = msg.format(hdr.tell()-4)\n raise ValueError(msg)\n self.header['SIMSHeader'] = self._sims_header(hdr)\n\n if self.header['analysis version'] >= 5:\n if analparamnanobis_pos < 0:\n msg = 'Anal_param_nano_bis not found in header, '\n msg += 'don\\'t know where second Detectors section starts.'\n warnings.warn(msg)\n else:\n hdr.seek(analparamnanobis_pos + 24)\n self.header['Detectors'].update(self._detectors2(hdr))\n xl = self.header['Detectors'].pop('exit slit xl')\n for n in range(7):\n det = self.header['Detectors']['Detector {}'.format(n+1)]\n w = list(det['exit slit widths'])\n w[2] = xl[5*n:5*(n+1)]\n det['exit slit widths'] = tuple(w)\n h = list(det['exit slit heights'])\n h[2] = xl[5*(n+1):5*(n+2)]\n det['exit slit heights'] = tuple(h)\n\n # Presets\n self.header['Presets'] = self._presets(hdr)\n\n # End Detectors pt 2 based on anal_param_nano_bis position\n\n # Last part of detectors\n if self.header['analysis version'] >= 6:\n d3 = self._detectors3(hdr)\n self.header['Detectors']['TIC'] = d3.pop('TIC')\n for k, v in d3.items():\n self.header['Detectors'][k].update(v)\n # End PrimaryBeam/SecondaryBeam/Presets/Detectors based on anal_param_nano position\n\n # Image header, at end of overall header\n if self.header['file type'] == 26:\n hdr.seek(-176, 2)\n self.header['Isotopes'] = self._isotopes_hdr(hdr)\n elif self.header['file type'] in (21, 22, 31, 35):\n # no image header for line scan or beam stability\n pass\n else:\n hdr.seek(-84, 2)\n self.header['Image'] = self._image_hdr(hdr)\n\n # Done reading header. Check for and read external files for extra info.\n if os.path.exists(os.path.splitext(self.filename)[0] + '.chk_is'):\n self._read_chk_is()", "def _secondary_beam(self, hdr):\n # Called ApSecondaryNano in OpenMIMS\n d = {}\n tmp = unpack(self._bo + 'd 42i 2d', hdr.read(192))\n d['E0W'], d['ES'] = tmp[:2]\n d['ES widths'] = tmp[2:12]\n d['ES heights'] = tuple(tmp[12:22])\n d['AS'] = tmp[22]\n d['AS widths'] = tuple(tmp[23:33])\n d['AS heights'] = tuple(tmp[33:43])\n d['EnS'], d['EnS width'] = tmp[43:]\n return d", "def read_parameters():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n param1 = hdulist1[1].data['e1'][:sample]\n param2 = hdulist1[1].data['e2'][:sample]\n weights = hdulist1[1].data['weight'][:sample]\n return param1, param2, weights", "def Phi(l,m,theta,phi):\n Psilm_th, Psilm_ph=Psi(l,m,theta,phi);\n Philm_th=-Psilm_ph;\n Philm_ph=+Psilm_th;\n return Philm_th, Philm_ph", "def get_PSMC_results(filename, name):\n text = \"\"\n with open(filename, \"r\") as f:\n text = f.read()\n results_block = text.split(\"//\\n\")[-2]\n time = []\n IICR_2 = []\n for line in results_block.split('\\n'):\n if line[0:2] == \"RS\":\n values = line.split(\"\\t\")\n time.append(float(values[2]))\n IICR_2.append(float(values[3]))\n # Get the theta and rho values\n theta_rho_line = text.split(\"PA\\t\")[-1]\n theta_rho_line.split(\"\\n\")[0]\n (theta, rho) = theta_rho_line.split(\" \")[1:3]\n theta = float(theta)\n rho = float(rho)\n\n return {'name': name, 'model':'psmc', 'x_vector' : time, 'y_vector': IICR_2, 'theta': theta, 'rho': rho}", "def _position_cylindrical2spherical(pos):\n\n rho=pos[:,0]\n theta_cylindrical=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(rho**2+z**2)\n theta_spherical=np.arctan2(rho,z)\n phi=theta_cylindrical\n\n return np.dstack((r,theta_spherical,phi))[0]", "def spherical_project(x, y, cos_lat, sin_lat,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat, native_pole_x\n ): # pragma: no cover\n right_angle = np.pi / 2\n\n d_lon = x - celestial_pole_x\n if equal_angles(np.abs(celestial_pole_y), right_angle):\n if celestial_pole_y > 0:\n phi = native_pole_x + d_lon + np.pi\n theta = y\n else:\n phi = native_pole_x - d_lon\n theta = -y\n else:\n cos_d_lon = np.cos(d_lon)\n\n phi = native_pole_x + np.arctan2(\n -cos_lat * np.sin(d_lon),\n (sin_lat * celestial_cos_lat)\n - (cos_lat * celestial_sin_lat * cos_d_lon))\n\n theta = asin(\n (sin_lat * celestial_sin_lat)\n + (cos_lat * celestial_cos_lat * cos_d_lon))\n\n phi = np.fmod(phi, two_pi)\n\n return theta, phi", "def get_packed(self, use_sqrt=None):\n\n if (use_sqrt is None) or (use_sqrt == self.use_sqrt):\n return self._params\n\n pa = self._params.copy()\n cov_re = self.get_cov_re()\n\n if use_sqrt:\n L = np.linalg.cholesky(cov_re)\n pa[self.k_fe:] = L[self._ix]\n else:\n pa[self.k_fe:] = cov_re[self._ix]\n\n return pa", "def phase_center(self):\n try:\n rx_number = extract_channel_number(self.title)\n ph_center = (_np.array(self.GPRI_tx_coord) + _np.array(\n getattr(self, \"GPRI_rx{num}_coord\".format(num=rx_number)))) / 2\n return ph_center\n except AttributeError:\n return 0", "def cart2pol(x, y): \n\n\trho = np.sqrt(x**2 + y**2)\n\tphi = np.arctan2(y, x)\n\n\treturn rho, phi", "def compute_projections(self, alpha_phi, alpha_theta):\n self.find_pixels()\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n self.compute_reference_sphere(x,y)", "def two_body(lat, h, psi_r, psi_i):\n\n #minus sign is to eliminate the hopping constant\n h_backwards = -np.tril(h)\n h_backwards[0, -1] = h[0, -1]\n h_backwards[-1, 0] = 0.\n #calculates the first expectation value in the commutator, contracting from right to left\n psi_new = one_elec(lat, h_backwards, psi_r, False) + 1.j*one_elec(lat, h_backwards, psi_i, False)\n psi_new = two_elec(lat, psi_new.real, psi_new.imag)\n expectation1 = np.dot((psi_r+1j*psi_i).conj(), psi_new)\n\n h_forwards = -np.triu(h)\n h_forwards[-1, 0] = h[-1, 0]\n h_forwards[0, -1] = 0.\n #calculates the second expectation value in the commutator, contracting from right to left\n psi_new = two_elec(lat, psi_r, psi_i)\n psi_new = one_elec(lat, h_backwards, psi_new.real, False) + 1.j*one_elec(lat, h_backwards, psi_new.imag, False)\n expectation2 = np.dot((psi_r+1j*psi_i).conj(), psi_new)\n\n return expectation1 - expectation2", "def nfw_physical2angle_fromNFWparams(self, rhos, rs, z):\n\n D_d = self.cosmo.D_A_z(z)\n Rs_angle = rs / D_d / self.cosmo.arcsec # Rs in arcsec\n theta_Rs = rhos * (4 * rs ** 2 * (1 + numpy.log(1. / 2.)))\n eps_crit = self.get_sigma_crit_lensing(z, self.z_source)\n\n return Rs_angle, theta_Rs / eps_crit / D_d / self.cosmo.arcsec", "def getplatepos(self, phi=0, chi=0, omega=0):\n\n #Save the specified angles in the structure\n angles = np.array([phi, chi, omega]);\n\n #We divvy up the phi rotation between the plate and the sample motor.\n #We round to the nearest multiple of the sample motor step size.\n self.sample_motor_phi = round(phi / self.sample_motor_step_size) * self.sample_motor_step_size\n #And the remainder is handled by the sample plate position.\n sample_plate_phi = phi - self.sample_motor_phi\n\n #This calculates the rotation matrix for the sample PLATE only.\n rot_M_plate = rotation_matrix(sample_plate_phi, chi, omega)\n\n #And this is the rotation matrix for the sample motor only\n rot_M_motor = rotation_matrix(self.sample_motor_phi, 0, 0)\n\n\n #X,Y,Z translation vector (in mm) to perform BEFORE moving the sample plate.\n #To calculate these, we use the relative_sample_position vector.\n translate_v = -self.relative_sample_position\n #But we have to correct for the sample motor phi rotation by rotating the translation\n #vector as well.\n translate_v = np.dot(rot_M_motor, translate_v)\n \n\n #------------------ SAMPLE PLATE ----------------------\n #3 vectors representing the position of the mounting points on the plate,\n #when it is horizontal and with the sample at 0\n #Remember, the plate is in the X-Z plane.\n\n #distance between center of plate and each mounting point.\n d = self.mounting_side_length / (2 * np.cos(pi / 6))\n #Distance to the edge on the other side\n d2 = np.sin(pi / 6) * d\n\n #Vectors representing the sample plate at the \"zero\" position.\n sample_plate_zero = np.column_stack(([self.mounting_side_length / 2, self.sample_plate_height, d2],\n [-self.mounting_side_length / 2, self.sample_plate_height, d2],\n [0, self.sample_plate_height, -d]))\n\n #------------------ OTHER USEFUL POINTS ----------------------\n #Vector representing the position of the middle of the sample plate.\n sample_middle = column([0, self.sample_plate_height, 0])\n\n #Make a vector representing the position of the sample at the end of the\n #pin.\n pin = self.relative_sample_position\n\n #Make vector to represent the sample motor orientation (at zero)\n self.motor_vector_length = 20\n motor = column([0, self.sample_plate_height, self.motor_vector_length])\n\n\n #------------------ APPLY TRANSFORMS ----------------------\n #For the sample plate: we do not apply the motor_phi rotation.\n \n #Do a translation of the position - we are moving the entire sample plate\n # This places the sample in the 0,0,0 position.\n sample_plate = get_translated_vectors(sample_plate_zero, translate_v)\n\n #Now do a rotation (phi,chi,omega)\n sample_plate = dot(rot_M_plate, sample_plate)\n\n #The pin rotates with the motor, then translates, then then rotates with the\n #sample plate.\n pin = dot(rot_M_motor, pin)\n pin = get_translated_vectors(pin, translate_v)\n pin = dot(rot_M_plate, pin)\n\n #Motor vector = same as pin.\n motor = dot(rot_M_motor, motor)\n motor = get_translated_vectors(motor, translate_v)\n motor = dot(rot_M_plate, motor)\n\n #Same for the sample_middle vector\n sample_middle = dot(rot_M_motor, sample_middle)\n sample_middle = get_translated_vectors(sample_middle, translate_v)\n sample_middle = dot(rot_M_plate, sample_middle)\n\n #Sample plate coordinates are:\n #i.e. x_A2, y_A2, x_B2, etc. (as written in Janik's notebook)\n\n #We want to find the positions of the other ends of the legs on the fixed\n #plate, x_A1, etc.\n fixed_plate = np.copy(sample_plate)\n\n #Legs A and B are fixed in their orientation along Z, and C along X, so we\n #know the Z_A1, Z_B1 and X_C1 positions on the FIXED plate are the same as\n #on the SAMPLE plate.\n\n #We also know the height of all these points, y = fixed_plate_height.\n fixed_plate[COORD_Y, :] = self.fixed_plate_height\n \n #This leaves x_A1, x_B1, and z_C1 to find.\n\n #Angle between the x direction and the (A1 to A2) vector formed by leg A\n theta_A = np.arcsin((sample_plate[COORD_Y, MOUNT_A] - self.fixed_plate_height) / self.leg_length)\n if theta_A > -pi / 2:\n #Force theta_A to be ~-120 degrees\n theta_A = -pi - theta_A\n \n\n #Angle between the x direction and the B1 to B2) vector formed by leg B\n theta_B = np.arcsin((sample_plate[COORD_Y, MOUNT_B] - self.fixed_plate_height) / self.leg_length)\n\n #We can easily calculate the x position from these\n x_A1 = sample_plate[COORD_X, MOUNT_A] - self.leg_length * cos(theta_A)\n x_B1 = sample_plate[COORD_X, MOUNT_B] - self.leg_length * cos(theta_B)\n\n fixed_plate[COORD_X, MOUNT_A] = x_A1\n fixed_plate[COORD_X, MOUNT_B] = x_B1\n\n\n #Finally we find the position of Leg C\n phi_C = np.arcsin((sample_plate[COORD_Y, MOUNT_C] - self.fixed_plate_height) / self.leg_length)\n if phi_C < -pi / 2:\n #Force phi_C to be ~-60 degrees\n phi_C = 2*pi + phi_C\n\n #Now we calc. the Z position of leg C on the fixed plate.\n z_C1 = sample_plate[COORD_Z, MOUNT_C] - self.leg_length * cos(phi_C)\n fixed_plate[COORD_Z, MOUNT_C] = z_C1\n\n\n #Assign these plate position in the goniometer object, which is returned\n self.sample_plate = sample_plate\n self.fixed_plate = fixed_plate\n self.sample_plate_zero = sample_plate_zero\n\n #Also return the pin and motor vectors\n self.pin = pin\n self.motor = motor\n self.sample_middle = sample_middle", "def cart2pol(x, y):\n rho = np.sqrt(x ** 2 + y ** 2)\n phi = np.arctan2(y, x)\n return rho, phi", "def getGlobalPosition_Racing(self, ex, ey, xd, yd, psid):\n\n # x = ex*np.cos(psid) - ey*np.sin(psid) + xd\n x = xd\n y = (ey - xd*np.sin(psid) + yd*np.cos(psid) + x*np.sin(psid)) / np.cos(psid)\n\n return x, y", "def get_initial_params(self, x, y, yerr):\n ampl = y[0]\n offset = 0\n tau = log(y[-1] / float(y[0])) / (x[-1] - x[0])\n if self.amplitude != None:\n p0 = array([tau])\n else:\n if self.offset:\n p0 = array([tau, ampl, offset])\n else:\n p0 = array([tau, ampl])\n return p0", "def get_parameters(self): \n audio_emotions_topic = rospy.get_param(\"~audio_emotions_topic\")\n gcp_name = rospy.get_param(\"~gcp_name\")\n gcp_project = rospy.get_param(\"~gcp_project\")\n gcp_version = rospy.get_param(\"~gcp_version\")\n json_path = rospy.get_param(\"~json_path\")\n model_path = rospy.get_param(\"~model_path\")\n emotions_logfile = rospy.get_param(\"~emotions_logfile\")\n robot_ip = rospy.get_param(\"~robot_IP\")\n s2t_topic = rospy.get_param(\"~s2t_topic\")\n pred_mode = rospy.get_param(\"~pred_mode\")\n raw_audio_topic = rospy.get_param(\"~raw_audio_topic\")\n dest_num_channels = rospy.get_param(\"~dest_num_channels\")\n dest_rate = rospy.get_param(\"~dest_rate\")\n max_iter = rospy.get_param(\"~max_iter\") \n sound_path = rospy.get_param(\"~sound_path\")\n wav_topic = rospy.get_param(\"~wav_topic\")\n stats_logfile = rospy.get_param(\"~stats_logfile\")\n stats_topic = rospy.get_param(\"~stats_topic\")\n return (audio_emotions_topic, gcp_name, gcp_project, gcp_version, json_path, model_path, emotions_logfile, robot_ip, s2t_topic, pred_mode, raw_audio_topic, dest_num_channels, dest_rate, max_iter, sound_path, wav_topic, stats_logfile, stats_topic)", "def spinex_phi(infile, sequence):\n return np.loadtxt(infile, usecols=3, skiprows=1).reshape((1, -1, 1))", "def get_pv_keywords(header):\n cd = np.matrix([[header.get('CD1_1', 0.0), header.get('CD1_2', 0.0)],\n [header.get('CD2_1', 0.0), header.get('CD2_2', 0.0)]], dtype=np.float64)\n pv1 = np.zeros((40,), dtype=np.float64)\n pv2 = np.zeros((40,), dtype=np.float64)\n for k in range(40):\n pv1[k] = header.get('PV1_%d' % k, 0.0)\n pv2[k] = header.get('PV2_%d' % k, 0.0)\n return cd, pv1, pv2", "def osp2():\n return dict(\n kloc= range(75,125),\n docu = [3,4], ltex = [2,5],\n sced = [2,3,4], Pmat = [4,5],\n Prec = [3,4, 5],\n Resl = [4], Team = [3],\n acap = [4], aexp = [4],\n cplx = [4], data = [4],\n Flex = [3], pcap = [3],\n pcon = [3], pexp = [4],\n pvol = [3], rely = [5],\n ruse = [4], site = [6],\n stor = [3], time = [3],\n tool = [5])", "def getPhi(mass,resonance):\n return numpy.arctan((resonance.r0*resonance.w0)/(mass**2-resonance.w0**2)) #need to make this arccotan? invert args", "def _get_init_pose(self):\n return self.init_pose_R, self.init_pose_t", "def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs", "def setup_parser(self) -> Dict[str, Any]:\n\n\t# % year,doy, RH(m), Month, day, azimuth(deg),freq, satNu, LSP amp,pk2noise,UTC(hr) \n\t# 2021 9 4.888 1 9 225.3 1 2 9.51 3.23 10.08\n\t# 2021 9 5.018 1 9 181.3 1 15 7.79 2.84 15.67\n\t# 2021 9 5.123 1 9 185.4 1 16 6.27 3.01 0.68\n #----+----0----+----1----+----2----+----3----+----4----+----5----+----6----+----7\n return dict(\n skip_header=1,\n names=(\n \"year\",\n \"doy\",\n \"reflection_height\",\n \"month\",\n \"day\",\n \"azimuth\",\n \"frequency\",\n \"satellite\",\n \"amplitude\",\n \"peak2noise\",\n \"hour\",\n ),\n dtype=(\"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\"),\n )", "def _primary_beam(self, hdr):\n # Called ApPrimaryNano in OpenMIMS\n d = {}\n start_position = hdr.tell()\n d['source'], d['current start'], d['current end'], d['Lduo'], d['L1'] = \\\n unpack(self._bo + '8s 4i', hdr.read(24))\n\n # Each widths list is 10 ints long\n d['Dduo'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['Dduo widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n d['D0'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['D0 widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n d['D1'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['D1 widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n\n # 4 bytes unused\n hdr.seek(4, 1)\n d['raster'], d['oct45'], d['oct90'], d['E0P'], \\\n d['pressure analysis chamber'] = \\\n unpack(self._bo + '4d 32s', hdr.read(64))\n\n d['source'] = self._cleanup_string(d['source'])\n d['pressure analysis chamber'] = self._cleanup_string(d['pressure analysis chamber'])\n\n if self.header['analysis version'] >= 3:\n d['L0'] = unpack(self._bo + 'i', hdr.read(4))[0]\n if self.header['analysis version'] >= 4:\n d['hv cesium'], d['hv duo'] = unpack(self._bo + '2i', hdr.read(8))\n # DCs not in OpenMIMS; only in certain release/version?\n d['Dcs'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['Dcs widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n\n # skip bytes until total read in this function is 552\n # OpenMIMS: size_Ap_primary_nano = 552\n # Newer versions have rest filled with \\xCC continuation bytes, but\n # older versions have null-bytes, but not all bytes are null!!\n # The numbers do not seem to represent anything, though, so can be skipped.\n hdr.seek(start_position + 552)\n return d", "def cart2pol(x,y):\r\n th = np.angle(x+1j*y)\r\n rho = np.abs(x+1j*y)\r\n \r\n return th, rho", "def get_projection(attrs):\n df = load_df()\n\n X = get_all_vectors(df, attrs)\n logger.info('- Data shape original: {}'.format(X.shape))\n\n X = X if isinstance(X, np.ndarray) else X.toarray()\n X = dimension_reduction(X, attrs['decomposition'], attrs['distanceMetric'])\n return X, df", "def detail_matching(self):\n paradic = self.cfg['param']['paradic']\n work_dir = self.work_dir\n \n x = float(self.cfg['param']['x']) # selected pixel in the first image\n y = float(self.cfg['param']['y'])\n \n # sift parameters\n # number of bins in the orientation histogram\n n_bins = int(paradic['n_bins']) \n n_hist = int(paradic['n_hist']) \n # descriptor of n_hist X n_hist weighted histograms with n_ori\n n_ori = int(paradic['n_ori']) \n delta_min = float(paradic['delta_min'])\n sigma_min = float(paradic['sigma_min'])\n sigma_in = float(paradic['sigma_in'])\n lambda_ori = float(paradic['lambda_ori'])\n lambda_descr = float(paradic['lambda_descr'])\n #threshold defining reference orientations\n n_spo = int(paradic['n_spo'])\n \n # Read feature vectors from output files\n if (os.path.getsize(work_dir+'OUTmatches.txt') > 0 ):\n pairdata = find_nearest_keypoint(work_dir+'OUTmatches.txt', y, x)\n \n illustrate_pair(pairdata, n_bins, n_hist, n_ori, work_dir)\n\n \n # Read keys coordinates.\n d = 6+n_bins+n_hist*n_hist*n_ori # size of keydata inside pairdata\n v = n_hist*n_hist*n_ori\n [x1, y1, sigma1, theta1] = [float(x) for x in pairdata[0:4]]\n [o1, s1] = [float(x) for x in pairdata[4+v:4+v+2]]\n [x2a, y2a, sigma2a, theta2a] = [float(x) for x in pairdata[d:d+4]]\n [o2a, s2a] = [float(x) for x in pairdata[d+4+v:d+4+v+2]]\n [x2b, y2b, sigma2b, theta2b] = \\\n [float(x) for x in pairdata[2*d:2*d+4]]\n [o2b, s2b] = [float(x) for x in pairdata[2*d+4+v:2*d+4+v+2]]\n \n draw_one_match(pairdata,\n work_dir+'input_0.png',\n work_dir+'input_1.png',\n d,\n lambda_ori,\n lambda_descr,\n n_hist,\n work_dir+'OUTonepair.png')\n \n \n # Extract thumbnails.\n # keypoint 1 (image 1)\n print ' '.join(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n self.wait_proc(proc, timeout=self.timeout)\n \n # keypoint 2a (nearest neighbor in image 2)\n print ' '.join(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n # keypoint 2b (second nearest neighbor in image 2)\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2b), str(y2b), str(sigma2b), str(theta2b), str(o2b), str(s2b),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2b\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n \n return 1", "def extract_calib_info(fname):\n\n # read in the text file\n f = open(fname, 'r')\n lines = f.readlines()\n\n # reading criteria\n k1 = 'fitting method'\n k2 = 'function evals'\n k3 = 'data points'\n k4 = 'Bayesian info crit'\n k5 = ' (' # calibrated parameters\n k6 = '(init' # calibrated parameters\n k7 = '+/-' # calibrated parameters\n k8 = ':' # calibrated parameters\n k9 = '(fixed' # calibrated parameters\n k10 = '==' # calibrated parameters\n\n # info to keep\n info = [e.split('=') if (k1 in e) else [e.split('=')[1]] if ((k2 in e) or\n (k3 in e) or (k4 in e)) else\n [(e.split(k6)[0].split(k5)[0].split(k7)[0].split(k8)[0]),\n (e.split(k6)[0].split(k5)[0].split(k7)[0].split(k8)[1]),\n e.split(k6)[0].split(k5)[0].split(k7)[1]] if (k7 in e) else\n [e.split(k6)[0].split(':')[0], e.split(k6)[0].split(':')[1], 'nan']\n if (k6 in e) else [e.split(k9)[0].split(':')[0],\n e.split(k9)[0].split(':')[1], 'nan']\n if (k9 in e) else [e.split(k10)[0].split(':')[0],\n e.split(k10)[0].split(':')[1], 'nan']\n if (k10 in e) else [''] for e in lines]\n\n # remove end lines and formatting issues\n info = [e.strip('\\n') for sub in info for e in sub if e != '']\n info = [e.replace(' ', '') if (':' in e) else e.strip() for e in info]\n\n # split into sublists containing each solver's info\n info = [list(sub) for e, sub in groupby(info, lambda x: k1 not in x) if e]\n\n return info", "def _position_spherical2cylindrical(pos):\n \n\n r=pos[:,0]\n theta_spherical=pos[:,1]\n phi_spherical=pos[:,2]\n\n if any(theta_spherical>np.pi) or any(theta_spherical<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n rho=r*np.sin(theta_spherical)\n theta_cylindrical=phi_spherical\n z=r*np.cos(theta_spherical)\n\n return np.dstack((rho,theta_cylindrical,z))[0]", "def cart2spheric(x, y, z):\n # doesn't compute r because chosen egal to 1\n with np.errstate(all='ignore'):\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n\n return theta, phi", "def psi_packet_k(pos=x1,angular_f=omega1,time=t1,phi_dic=example_phik()):\n\n phi_var = phi_dic['var']\n phi_par = 1, np.pi, \n phi_y = phi_dic['y']\n\n psi_var = sy.var('x omega t i')\n psi_par = pos, angular_f, time, sy.I\n\n # var = phi_var + psi_var\n # par = phi_par + psi_par\n var = psi_var\n par = psi_var\n\n k = sy.symbols('k')\n y1 = phi_y * sy.exp( i * (k*x - omega*t) )\n\n return y1\n\n # def integrand(k,x,omega,t):\n # return phi(k) * np.exp( 1j * (k*x - omega*t) )\n #\n # I = si.quad(integrand, -np.inf, np.inf, args=(x,omega,t) )\n #\n # return 1/np.sqrt( 2 * np.pi ) * ( I[0] - I[1] )", "def parse_prnu_file():\n hdf_name = r'C:\\Users\\nmishra\\Workspace\\TEMPO\\PRNU_map\\\n batch_2017Jun20_TEMPO_PRNU_-20Tccd__46Tfpe_3pixSpectral_3pixSpatial.h5'\n file = h5py.File(hdf_name, 'r')\n prnu = file.get('prnu')\n prnu = np.array(prnu).transpose()\n quad_d = prnu[2:1030, 10:1034]\n quad_c = prnu[2:1030, 1078:2102]\n quad_a = prnu[1062:2090, 10:1034]\n quad_b = prnu[1062:2090, 1078:2102]\n prnu_map_lower = np.concatenate((quad_d, quad_c), axis=1)\n prnu_map_upper = np.concatenate((quad_a, quad_b), axis=1)\n prnu_map = np.concatenate((prnu_map_lower, prnu_map_upper), axis=0)\n return prnu_map", "def alignment_org(angle=0.1):\n proposal_id('2023_2', '311564_test')\n yield from alignement_gisaxs_multisample(angle=angle)\n RE.md['ai_0'] = piezo.th.user_setpoint.get()\n proposal_id('2023_2', '311564_Pettersson')", "def find_header_info(file):\n\n hdr = pyfits.getheader(file, 1)\n obsid = hdr['OBS_ID']\n detnam = hdr['DETNAM']\n date_obs = hdr['DATE-OBS']\n date_end = hdr['DATE-END']\n tstart = hdr['TSTART']\n tstop = hdr['TSTOP']\n ra_pnt = hdr['RA_PNT']\n dec_pnt = hdr['DEC_PNT']\n roll_pnt = hdr['ROLL_PNT']\n defocus = hdr['DEFOCUS']\n foc_len = hdr['FOC_LEN']\n ra_nom = hdr['RA_NOM']\n dec_nom = hdr['DEC_NOM']\n sim_x = hdr['SIM_X']\n sim_y = hdr['SIM_Y']\n sim_z = hdr['SIM_Z']\n\n return [obsid, detnam, date_obs, date_end, tstart, tstop, ra_pnt, dec_pnt, ra_nom, dec_nom, roll_pnt, foc_len, defocus, sim_x, sim_y, sim_z]", "def skyrmion_m_field(self, pos, sign,\n sk_pos=None, sk_r=4, core=1, pi_factor=1.,\n out_skyrmion_dir=None\n ):\n\n if sk_pos is None:\n # We assume a square sized hexagonal mesh so the centre\n # is at half of every dimension\n sk_pos = self.sim.mesh.Lx * 0.5, self.sim.mesh.Ly * 0.5\n\n x = (pos[0] - sk_pos[0])\n y = (pos[1] - sk_pos[1])\n\n if np.sqrt(x ** 2 + y ** 2) <= sk_r:\n # Polar coordinates:\n r = (x ** 2 + y ** 2) ** 0.5\n phi = np.arctan2(y, x)\n # This determines the profile we want for the skyrmion\n # Single twisting: k = pi / R\n k = pi_factor * np.pi / sk_r\n\n # We define here a 'hedgehog' skyrmion pointing down\n return (sign * np.sin(k * r) * np.cos(phi),\n sign * np.sin(k * r) * np.sin(phi),\n core * np.cos(k * r))\n else:\n if not out_skyrmion_dir:\n return (0, 0, -core)\n else:\n return out_skyrmion_dir", "def _ps_fields(cls):\n return (\n ('exp_version', '', 'S16'),\n ('ad_observed', 0, 'b1'),\n ('is_stim', 0, 'b1')\n )", "def _read_params_txt(self) -> dict:\n df = pd.read_csv(self.file_path, sep=\" \", header=None, index_col=0).T\n\n sources_info = {\n \"sample_rate\": float(df[\"samplerate\"].iloc[0]),\n \"data_format\": df[\"dataformat\"].str.replace(\"'\", \"\").iloc[0],\n \"n_samples\": None,\n \"path\": self.file_path,\n }\n\n return sources_info", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def get_pars(self):\n return [self.z, self.b, self.logN]", "def project_p2c_image(src, H): #---- project p to c (whole image)\r\n Z = H[2]; phi= H[3]; S= H[4]; TV= H[5]; TU= H[6];\r\n rows= src.shape[0]; cols= src.shape[1]; # get image size info\r\n diag= np.sqrt(rows**2+cols**2); # diagnol length\r\n radi= int(diag*S/2*1.1); # radius of new plot should be larger\r\n dest= np.zeros((radi*2,radi*2,3)) # projection result\r\n cosf= np.cos(phi); sinf= np.sin(phi); # rotation parameters\r\n u0 = radi-(TU-np.floor(TU)); # only process fractional part\r\n v0 = radi-(TV-np.floor(TV)); # of TU and TV\r\n kv = np.arange(0,radi*2); # \r\n #--- ---\r\n srcx= src.copy();\r\n srcx[0,:,:]=0; srcx[rows-2:rows,:,:]=0; \r\n srcx[:,0,:]=0; srcx[:,cols-2:cols,:]=0;\r\n #--- mapping ---\r\n for ku in range(0,radi*2): # scan each column\r\n UP = (ku-u0)/S; VP= (kv-v0)/S; # correct tu,tv,s\r\n RP =-sinf*UP + cosf*VP;\r\n CP = cosf*UP + sinf*VP; # correct rotation phi\r\n theta= CP/Z; # horizontal angle\r\n C = Z*np.tan(theta) + cols/2;\r\n R = RP/np.cos(theta) + rows/2;\r\n #--- interpolation ---\r\n C = np.minimum(np.maximum(C, 0), cols-2);\r\n R = np.minimum(np.maximum(R, 0), rows-2); \r\n C0 = np.floor(C).astype(int); C1= C-C0; \r\n R0 = np.floor(R).astype(int); R1= R-R0; \r\n for m in range(0,3):\r\n pixel = srcx[R0 ,C0 ,m]*(1-R1)*(1-C1);\r\n pixel+= srcx[R0 ,C0+1,m]*(1-R1)*( C1);\r\n pixel+= srcx[R0+1,C0 ,m]*( R1)*(1-C1);\r\n pixel+= srcx[R0+1,C0+1,m]*( R1)*( C1);\r\n dest[kv,ku,m]= pixel; \r\n return dest", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def set_params_proj(ima, p, xform = \"xform.projection\"):\n\tfrom EMAN2 import Vec2f\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2]})\n\tt.set_trans(Vec2f(-p[3], -p[4]))\n\tima.set_attr(xform, t)", "def parameters(self):\n return self.pars", "def getCoords(self,string=False,parse=False):\n if string:\n RA = self.header['RA']\n dec = self.header['DEC']\n return RA,dec\n elif parse:\n RA = tuple(map(float,self.header['RA'].split(\":\")))\n dec = tuple(map(float,self.header['DEC'].split(\":\")))\n return RA,dec\n return coordinates.SkyCoord(\"%s %s\"%(self.header['RA'],self.header['DEC']),unit=(units.hourangle,units.degree))", "def setup_parser(self) -> Dict[str, Any]:\n\n\n # % GALAT - SPP Single Point Positioning\n # % -------------------------------------\n # % Processing Option\n # % ------------------\n # % GNSS system(s) : GALILEO\n # % Orbit type : Broadcast - INAV\n # % Solution type : SPP\n # % Frequency : E1\n # % Elevation mask : 5.0 deg\n # % Time interval : 30.0 s\n # % Ionosphere opt : NeQuick-G\n # % Troposhere opt : GMF with GPT\n # % Obs start : 2020/01/04 00:00:00.0 GPST (week 2086 518400.0s)\n # % Obs end : 2020/01/04 23:59:30.0 GPST (week 2086 604770.0s)\n # % Epoch expected : 2880\n # % Epoch have : 2880\n # %\n # % Input file(s) : KOUG00GUF_R_20200040000_01D_30S_MO.rnx\n # % Input file(s) : CNES0030.20L\n # % Input file(s) : CNES0040.20L\n # % Input file(s) : igs14.atx\n # %\n # % RINEX header info\n # % ------------------\n # % Marker : KOUG 97301M402\n # % Receiver T/V/# : SEPT POLARX5TR 5.3.0 17323022503\n # % Antenna T/ /# : LEIAR25.R3 LEIT 10180007\n # % Position XYZ : 3855263.3407 -5049731.9986 563040.4252\n # % Antenna H/E/N : 0.0000 0.0000 0.0000\n self._parse_header()\n\n # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+--\n # 2020/01/04 00:00:00 5.098466365 -52.639742999 106.8901 -0.603 -0.821 -0.349 1.018 0.349 \n # 2020/01/04 00:00:30 5.098466094 -52.639742684 107.4962 -0.633 -0.856 0.257 1.065 0.257 \n # 2020/01/04 00:01:00 5.098466030 -52.639740961 107.6125 -0.640 -1.047 0.373 1.228 0.373 \n return dict(\n names=(\n \"yyyymmdd\", \n \"hhmmss\", \n \"latitude\", \n \"longitude\", \n \"height\", \n \"dlatitude\", \n \"dlongitude\", \n \"dheight\",\n \"hpe\",\n \"vpe\",\n \"site_vel_3d\",\n \"pdop\",\n \"num_satellite_available\",\n \"num_satellite_used\",\n ),\n comments=\"%\",\n delimiter=(10, 9, 15, 15, 10, 9, 9, 9, 9, 9, 9, 6, 4, 4),\n dtype=(\"U10\", \"U9\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\"),\n autostrip=True,\n )", "def MakeParams(params):\n if params['gs_model'] == system_types.kGroundStationModelTopHat:\n gps_primary_antenna_dir = [0.0, 0.0, -1.0]\n gps_primary_pos = [1.418, -1.657, -2.417]\n\n # TopHat doesn't actually have a secondary gps.\n gps_secondary_antenna_dir = gps_primary_antenna_dir\n gps_secondary_pos = gps_primary_pos\n\n # Angle [rad] from the GPS compass baseline to the zero-azimuth\n # reference of the perch frame. Note: The TopHat does not have a\n # GPS compass, but this value is set for historical consistency.\n gps_compass_to_perch_azi = -2.440\n\n elif params['gs_model'] == system_types.kGroundStationModelGSv1:\n gps_primary_antenna_dir = [0.0, 0.0, -1.0]\n # Position measured on 2015-06-15.\n gps_primary_pos = [0.0, 0.0, -2.94]\n # GSv1 doesn't actually have a secondary gps.\n gps_secondary_antenna_dir = gps_primary_antenna_dir\n gps_secondary_pos = gps_primary_pos\n\n # Angle [rad] from the GPS compass baseline to the zero-azimuth\n # reference of the perch frame\n gps_compass_to_perch_azi = -2.440\n\n elif params['gs_model'] == system_types.kGroundStationModelGSv2:\n gps_primary_antenna_dir = [0.0, 0.0, -1.0]\n gps_secondary_antenna_dir = [0.0, 0.0, -1.0]\n if params['test_site'] == system_types.kTestSiteParkerRanch:\n # See b/137283974 for details.\n gps_primary_pos = [-0.002, 0.011, -6.7]\n gps_secondary_pos = [-2.450, -0.428, -6.827]\n elif params['test_site'] == system_types.kTestSiteNorway:\n # See b/137660975 for details.\n gps_primary_pos = [-0.002, 0.011, -6.7]\n gps_secondary_pos = [-2.450, -0.428, -6.757]\n else:\n assert False, 'Unsupported test site.'\n # Angle [rad] from the GPS compass baseline to the zero-azimuth\n # reference of the platform frame. See b/118710931.\n gps_compass_to_perch_azi = np.deg2rad(169.84)\n\n else:\n assert False, 'Unsupported ground station model.'\n\n return {\n # Position [m] of the GS GPS antenna in the platform frame.\n # NOTE: The direction of the antennae is currently not used.\n 'primary_antenna_p': {\n 'antenna_dir': gps_primary_antenna_dir,\n 'pos': gps_primary_pos,\n },\n 'secondary_antenna_p': {\n 'antenna_dir': gps_secondary_antenna_dir,\n 'pos': gps_secondary_pos,\n },\n\n # Calibration for the ground station compass ([#], [rad], [#]).\n # The bias is used to account for the angle between the perch\n # frame and the NovAtel differential GPS receiver.\n # TODO: Remove this parameter once the computation of\n # compass heading from the primary and secondary antennae is implemented.\n 'heading_cal': {\n 'scale': 1.0, 'bias': gps_compass_to_perch_azi, 'bias_count': 0}\n }", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def reformat_pose_to_dict(self, now_pose):\n # now_pose è un dict in particolare { pose : [ {position : [{x : value , y:value , z:value} ] } , {orientation : [] } }\n # devo convertire i quaternioni in amgoli di eulero...estrarre i quaternioni da pose_now e convertirli in angoli RPY\n\n lato_corto_2 = 1.65 #1.45 # offset parcheggio\n \n #correggo gli offset x centrare le macchine nei parcheggi\n\n if abs(round(now_pose.position.x,2)) == 22.45:\n if now_pose.position.x < 0 :\n now_pose.position.x+=lato_corto_2\n now_pose.position.y-=0.4\n else :\n now_pose.position.x-=lato_corto_2\n now_pose.position.y+=0.4\n \n if abs(round(now_pose.position.y,2)) == 22.45:\n if now_pose.position.y < 0 :\n now_pose.position.y+=lato_corto_2\n now_pose.position.x+=0.4\n else :\n now_pose.position.y-=lato_corto_2\n now_pose.position.x-=0.4\n\n # correggo la z per renderla uguale all'asfalto che viene spownata nel mondo\n\n offset_asfalto = 0.3\n\n x = now_pose.position.x\n y = now_pose.position.y\n z = now_pose.position.z + offset_asfalto\n\n q1 = now_pose.orientation.x\n q2 = now_pose.orientation.y\n q3 = now_pose.orientation.z\n q4 = now_pose.orientation.w\n\n\n # converto i quaternioni in angoli di rulero RPY in radianti\n orientation_list = [q1,q2,q3,q4]\n\n euler = euler_from_quaternion( orientation_list )\n roll = euler[0]\n pitch = euler[1]\n yaw = round(euler[2],2) + np.pi\n\n\n # creo la lista dei parametri che mi servono nel campo pose:[] del file .yaml\n\n lista_parametri = [x ,y ,z ,roll ,pitch ,yaw ]\n\n # creo un dict con tutti i campi di cui ho bisogno nel file .yaml\n # settare le chiavi 'name' , ' type ' , 'package' , ' pose ' secondo le proprie necessità\n # i due stili sono equivalenti : usare quello preferito\n \"\"\"\n {\"name\" : \"park1\" , \n \"type\" : \"sdf\" , \n \"package\" : \"object_spawner\" , \n \"pose \":self.seq(lista_parametri) \n }\n \n \"\"\"\n lista_veicoli = ['macchina','pickup','ferrari','prius_hybrid','car_lexus','car_polo','car_volvo','car_golf']\n num_veicoli = 1\n\n #modificare qui implementando una funzione randomica se si vogliono piu veicoli casuali spawnati\n elemento_lista = {'name' : lista_veicoli[3],\n 'type': 'sdf',\n 'package': 'object_spawner',\n 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n #\"\"\"\n # elemento_lista = {'name' : 'ferrari',\n # 'type': 'urdf',\n # 'package': 'autopark',\n # 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n\n return elemento_lista" ]
[ "0.61034876", "0.60535675", "0.6002323", "0.55047", "0.53491384", "0.53456676", "0.5342712", "0.5333783", "0.5266437", "0.52358764", "0.52249706", "0.5206061", "0.5192297", "0.51854604", "0.5182782", "0.51370823", "0.51352656", "0.512739", "0.5105829", "0.50973487", "0.5085923", "0.5063176", "0.5059661", "0.5044287", "0.50372756", "0.5009923", "0.4982236", "0.49807766", "0.4974874", "0.49621952", "0.49576318", "0.49536368", "0.49446818", "0.4935566", "0.4934705", "0.49314937", "0.49223456", "0.49205387", "0.49038804", "0.48826665", "0.48767892", "0.48767516", "0.4865179", "0.4862632", "0.4821575", "0.48099375", "0.48091152", "0.4787045", "0.47816393", "0.4775132", "0.47706854", "0.47700006", "0.47693446", "0.47692135", "0.47653246", "0.47587967", "0.47581056", "0.4754437", "0.47418368", "0.47378832", "0.4736188", "0.47305557", "0.47284186", "0.47239423", "0.4720313", "0.47197944", "0.47195452", "0.47186023", "0.47002107", "0.46991056", "0.46961975", "0.469586", "0.46885586", "0.46826074", "0.46788505", "0.46761316", "0.46690562", "0.4668401", "0.46682835", "0.46670547", "0.4660715", "0.46581092", "0.46578318", "0.46538618", "0.46537203", "0.46507075", "0.46494615", "0.46481356", "0.46481013", "0.46435586", "0.46386766", "0.46386766", "0.46341792", "0.46316293", "0.46303368", "0.46260753", "0.4623719", "0.462122", "0.462122", "0.46188775" ]
0.6081615
1
set projection alignment parameters in the header phi theta psi s2x s2y
def set_params_proj(ima, p, xform = "xform.projection"): from EMAN2 import Vec2f t = Transform({"type":"spider","phi":p[0],"theta":p[1],"psi":p[2]}) t.set_trans(Vec2f(-p[3], -p[4])) ima.set_attr(xform, t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def set_phi(self,phi):\n\t\tr=self.r\n\t\tself.x = np.cos(np.deg2rad(phi))*r\n\t\tself.y = np.sin(np.deg2rad(phi))*r", "def set_params(self):\n \n lo, hi = self.R.get((self.h, self.w, self.m), (0.0, 0.0))\n params.update({\n 'gamma' : 1.0, # minesweeper is a finite horizon game\n 'epsilon': 0.0,\n 'K': 16,\n 'R_lo': lo,\n 'R_hi': hi,\n 'max_depth': self.h * self.w / 2,\n 'c':hi-lo\n })", "def get_params_proj(ima, xform = \"xform.projection\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],-d[\"tx\"],-d[\"ty\"]", "def map_sim_positions(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, ax1 = plt.subplots(figsize=(10,10))\n # p.gal_index = np.where(GR.file_name == 'z0.00_G7169_cG29270')[0][0]\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n # print('TEST!',gal_ob.file_name,p.gal_index)\n simdata = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n\n # Plot\n print(simdata.head())\n ax1.plot(simdata.x,simdata.y,'o',ms=2,mew=2)\n\n print(gal_ob.radius)\n # Limit axes limits a bit to avoid area with no particles...\n # ax1.set_xlim([-2/3*gal_ob.radius,2/3*gal_ob.radius])make_projec\n # ax1.set_ylim([-2/3*gal_ob.radius,2/3*gal_ob.radius])\n ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')", "def setParameters(self, sx_sim=None):\n # TODO rething that ..\n #if sx_sim is not None:\n #if ds_model is not None:\n #if di_model is not None:\n self.sx_sim = sx_sim\n p = defaultParams(chord=self._chord, rho=self._rho, sx=self.sx_sim, ds=self.ds_model, di=self.di_model,\n M=self._M33, C=self._C33, K=self._K33)\n p['beta'] = self._beta\n if len(p['Iq'])==0:\n raise Exception('No states are present')\n\n # --- Dynamic inflow / induction\n p['a0'] = self._a0\n p['ap0'] = self._ap0\n p['di_tau1'] = self.di_tau1\n p['di_tau2'] = self.di_tau2\n\n # --- Aerodynamic parameters\n if self._y_AQ>0: \n print('[WARN] y_AQ positive is unconventional')\n p['y_AQ'] = self._y_AQ\n if self._y_AT is None:\n p['y_AT'] = self._y_AQ+self._chord/2 # default is approximatively half a chord behind\n else:\n p['y_AT'] = self._y_AT\n p['x_AQ'] = self._x_AQ\n p['x_AT'] = self._x_AT\n if self._ppol is None:\n raise Exception('Polar parameters need to be set')\n p.update(self._ppol)\n # # p.update({'linModel':False, 'drag':drag})\n\n self.p_sim = p", "def set_physical_params(self, params):\n self.M500 = params[0]\n self.r500 = params[1]\n self.z = params[2]", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def alignment_org(angle=0.1):\n proposal_id('2023_2', '311564_test')\n yield from alignement_gisaxs_multisample(angle=angle)\n RE.md['ai_0'] = piezo.th.user_setpoint.get()\n proposal_id('2023_2', '311564_Pettersson')", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list", "def make_wcsheader(ra=40.07293, dec=-1.6137748, size=2, pixscale=0.1, get_hdu=False, theta=0):\n \n if np.isscalar(pixscale):\n cdelt = [pixscale/3600.]*2\n else:\n cdelt = [pixscale[0]/3600., pixscale[1]/3600.]\n \n if np.isscalar(size):\n npix = np.cast[int]([size/pixscale, size/pixscale])\n else:\n npix = np.cast[int]([size[0]/pixscale, size[1]/pixscale])\n \n hout = pyfits.Header()\n hout['CRPIX1'] = npix[0]/2\n hout['CRPIX2'] = npix[1]/2\n hout['CRVAL1'] = ra\n hout['CRVAL2'] = dec\n hout['CD1_1'] = -cdelt[0]\n hout['CD1_2'] = hout['CD2_1'] = 0.\n hout['CD2_2'] = cdelt[1]\n hout['NAXIS1'] = npix[0]\n hout['NAXIS2'] = npix[1]\n hout['CTYPE1'] = 'RA---TAN'\n hout['CTYPE2'] = 'DEC--TAN'\n \n wcs_out = pywcs.WCS(hout)\n \n theta_rad = np.deg2rad(theta)\n mat = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n\n rot_cd = np.dot(mat, wcs_out.wcs.cd)\n \n for i in [0,1]:\n for j in [0,1]:\n hout['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n wcs_out.wcs.cd[i,j] = rot_cd[i,j]\n \n cd = wcs_out.wcs.cd\n wcs_out.pscale = get_wcs_pscale(wcs_out) #np.sqrt((cd[0,:]**2).sum())*3600.\n \n if get_hdu:\n hdu = pyfits.ImageHDU(header=hout, data=np.zeros((npix[1], npix[0]), dtype=np.float32))\n return hdu\n else:\n return hout, wcs_out", "def __prepare_dh_params(self):\n self.alpha = symbols('alpha0:' + str(self.joint_count))\n self.a = symbols('a0:' + str(self.joint_count))\n self.q = symbols('q1:' + str(self.joint_count + 1))\n self.d = symbols('d1:' + str(self.joint_count + 1))", "def setInstrumentParameters(self, instrpars):\n\n # A reference to the primary header is also required.\n\n for p in self.assoc.parlist:\n p['image'].setInstrumentParameters (instrpars, p['exposure'].header)", "def __init__(self, params, print_df=True, print_help=False):\n stellar_type, position, parallax, proper_motion, v_radial = params\n self.init_params = params\n self.stellar_type = stellar_type\n self.proper_motion = proper_motion # [mas/year, mas/year]\n self.distance = 1/parallax # parsecs\n self.parallax = parallax # arcsecs\n self.position = position # [hms, dms]\n self.v_radial = v_radial # km/s\n\n self.galactic_coords = radec_to_galactic(self.position) # degrees\n\n # Proper motion, described in Cartesian components\n self.pm_dec = self.proper_motion[1]\n # We don't need to scale by cos(dec) because the units are already in mas/year\n self.pm_ra = self.proper_motion[0] #* np.cos(self.pm_dec)\n\n # Proper motion, described in angular components\n self.pm_mag = np.sqrt(self.pm_ra**2 + self.pm_dec**2) # mas/year\n # PA = angle east of north\n self.pm_posang = round(np.arctan(self.pm_ra/self.pm_dec), 4) # radians\n\n self.v_transverse = 4.74 * self.pm_mag * self.distance # km/s\n\n # Space velocity is the third leg of the v_trans/v_rad triangle.\n self.v_space = np.sqrt(self.v_transverse**2 + self.v_radial**2)\n\n star_obj = SkyCoord(Angle(position[0]), Angle(position[1]), frame='icrs')\n self.constellation = get_constellation(star_obj)\n\n self.d_from_GC = self.distance_to_galactic_center() # parsecs\n self.closer = True if self.d_from_GC > d_sun_GC else False\n\n d = [{'Name': 'Stellar Type', 'Value': self.stellar_type, 'units': 'N/A'},\n {'Name': 'Distance', 'Value': self.distance, 'units': 'parsec'},\n {'Name': 'Parallax', 'Value': self.parallax, 'units': 'arcsecs'},\n {'Name': 'Position', 'Value': self.position, 'units': '[hms, dms]'},\n {'Name': 'Galactic Coordinates', 'Value': self.galactic_coords,\n 'units': 'degrees'},\n {'Name': 'Proper Motion (RA)', 'Value': self.pm_ra, 'units': 'mas/year'},\n {'Name': 'Proper Motion (Dec)', 'Value': self.pm_dec, 'units': 'mas/year'},\n {'Name': 'Proper Motion Magnitude', 'Value': self.pm_mag, 'units': 'mas/year'},\n {'Name': 'Proper Motion Position Angle', 'Value': self.pm_posang,\n 'units': 'radians'},\n {'Name': 'Radial Velocity', 'Value': self.v_radial, 'units': 'km/s'},\n {'Name': 'Transverse Velocity', 'Value': self.v_transverse, 'units': 'km/s'},\n {'Name': 'Space Velocity', 'Value': self.v_space, 'units': 'km/s'},\n {'Name': 'Host Constellation', 'Value': self.constellation, 'units': 'N/A'},\n {'Name': 'Distance from Galactic Center', 'Value': self.d_from_GC,\n 'units': 'parsecs'},\n {'Name': 'Closer than Sun to GC?', 'Value': self.closer, 'units': 'N/A'}\n ]\n\n self.full_param_df = pd.DataFrame(d)\n\n if print_help:\n print getdoc(self), '\\n\\n'\n\n if print_df:\n print self.full_param_df", "def combine_par(output_dir): \n #start time\n start_time = time.time()\n \n # set input/output file paths\n infile0 = output_dir + 'TransformParameters.0.txt'\n infile1 = output_dir + 'TransformParameters.1.txt'\n outfile0 = output_dir +'TransformParameters.fwd.txt'\n outfile1 = output_dir +'TransformParameters.inv.txt'\n \n # define reference frame for registration\n ref = 0\n spacing = 1\n \n # Open parameter file 0 and search for GridSpacing and GridOrigin line\n text_filein0 = open( infile0, \"r\" )\n for line in text_filein0:\n if line.find( \"(GridOrigin \" ) == 0:\n origin_str = line\n elif line.find( \"(GridSpacing \" ) == 0:\n spacing_str = line\n text_filein0.close()\n \n # Extract time point origin from line\n origin_split = origin_str.strip().split(' ')\n origin_split = origin_split[ len( origin_split ) - 1 ].split(')')\n old_origin = float( origin_split[ 0 ] )\n \n # Extract time point spacing from line\n spacing_split = spacing_str.strip().split(' ')\n spacing_split = spacing_split[ len( spacing_split ) - 1 ].split(')')\n old_spacing = float( spacing_split[ 0 ] )\n \n \n print(\"Original grid origin in time dimension: \" + str( old_origin ))\n print(\"Original grid spacing in time dimension: \" + str( old_spacing ))\n print(\"\")\n \n # Determine new grid origin\n new_origin = ref - ( spacing / old_spacing ) * ( ref - old_origin )\n print( \"New grid origin in time dimension: \" + str( new_origin ))\n \n # Recompose origin and spacing lines\n new_origin_string = origin_str.strip().split(' ')\n new_origin_string.pop()\n new_origin_string = \" \".join( new_origin_string ) + \" \" + str( new_origin ) + \")\\n\"\n new_spacing_string = spacing_str.strip().split(' ')\n new_spacing_string.pop()\n new_spacing_string = \" \".join( new_spacing_string ) + \" \" + str( spacing ) + \")\\n\"\n \n # Reopen text file, replace origin and spacing and write to output file 0\n text_filein0 = open( infile0, \"r\" )\n text_fileout0 = open( outfile0, \"w\" )\n for line in text_filein0:\n if line.find( \"(GridOrigin \" ) == 0:\n # Write new origin line\n text_fileout0.write( new_origin_string )\n elif line.find( \"(GridSpacing \" ) == 0:\n # Write new spacing line\n text_fileout0.write( new_spacing_string )\n elif line.find( \"(InitialTransformParametersFileName \" ) == 0:\n # Remove initial transform\n text_fileout0.write( \"(InitialTransformParametersFileName \\\"NoInitialTransform\\\")\\n\" )\n else:\n # Write line read from input file (no change)\n text_fileout0.write( line )\n text_filein0.close()\n text_fileout0.close()\n \n # Open parameter file 1 and search for GridSize\n text_filein1 = open( infile1, \"r\" )\n for line in text_filein1:\n if line.find(\"(GridSize\") == 0:\n grid_str = line\n grid_split = grid_str.strip().split(' ')\n grid_split[-1] = grid_split[-1].replace(')','')\n grid_split = grid_split[1:]\n grid_float = [float(s) for s in grid_split]\n grid_all = int(grid_float[0] * grid_float[1] * grid_float[2] * grid_float[3])\n num_phase = int(grid_float[3])\n text_filein1.close()\n \n # Replace initial transform parameter filename\n text_filein1 = open( infile1, \"r\" )\n text_fileout1 = open( outfile1, \"w\" )\n for line in text_filein1:\n if line.find( \"(InitialTransformParametersFileName \" ) == 0:\n # Set initial transform filename\n text_fileout1.write( \"(InitialTransformParametersFileName \\\"\" + outfile0 + \"\\\")\\n\" )\n elif line.find(\"(TransformParameters \") == 0:\n # extract b-spline parameters, arrangment : x (Px*Py*Pz), y(Px*Py*Pz), z(Px*Py*Pz), t(Px*Py*Pz)\n transPar_str = line\n transPar_split = transPar_str.strip().split(' ')\n transPar_split[-1] = transPar_split[-1].replace(')','')\n transPar_split = transPar_split[1:]\n num_grid3d = int(grid_all / num_phase) \n str_seg = transPar_split[(ref*num_grid3d):((ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all+(ref*num_grid3d)): (grid_all + (ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all*2+(ref*num_grid3d)): (grid_all*2 + (ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all*3+(ref*num_grid3d)): (grid_all*3 + (ref+1)*num_grid3d)] * num_phase\n #str_seg = \"\"\n #str_seg = [str_seg + transPar_split[((ref*num_grid3d)+grid*i):((ref+1)*num_grid3d+grid*i)] * num_phase for i in range(4)]\n str_joined = ' '.join(str_seg)\n text_fileout1.write(\"(TransformParameters \" + str_joined + \")\\n\")\n else:\n # Write line read from input file (no change)\n text_fileout1.write( line )\n text_filein1.close()\n text_fileout1.close()\n \n # caclulate elapsed time\n end_time = time.time()\n elapsed_time = end_time - start_time\n print('combine_par done. elapsed time:', elapsed_time, 's')", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def set_parameters(self, par):\n try:\n for l in self.cell.layers:\n r_curve = cmf.VanGenuchtenMualem(\n Ksat=10**par.pKsat, phi=par.porosity, alpha=par.alpha, n=par.n\n )\n r_curve.w0 = r_curve.fit_w0()\n l.soil = r_curve\n self.cell.saturated_depth = 0.5\n self.gw.potential = self.cell.z - 0.5\n except RuntimeError as e:\n sys.stderr.write(\"Set parameters failed with:\\n\" + str(par) + \"\\n\" + str(e))\n raise", "def _set_psf_layout_psfex(self):\n\n print('setting psf layout for PSFEx')\n\n obj_data=self.obj_data\n psf_data=self.psf_data\n\n total_psf_pixels = 0\n\n #psf_npix = psf_size*psf_size\n\n psf_start_row = 0\n for iobj in range(obj_data.size):\n for icut in range(obj_data['ncutout'][iobj]):\n\n row = obj_data['orig_row'][iobj, icut]\n col = obj_data['orig_col'][iobj, icut]\n file_id = obj_data['file_id'][iobj,icut]\n\n p = psf_data[file_id]\n\n pim = p.get_rec(row,col)\n cen = p.get_center(row,col)\n\n psf_shape = pim.shape\n psf_npix = pim.size\n\n obj_data['psf_row_size'][iobj,icut] = psf_shape[0]\n obj_data['psf_col_size'][iobj,icut] = psf_shape[1]\n obj_data['psf_cutout_row'][iobj,icut] = cen[0]\n obj_data['psf_cutout_col'][iobj,icut] = cen[1]\n obj_data['psf_start_row'][iobj,icut] = psf_start_row\n\n psf_start_row += psf_npix\n total_psf_pixels += psf_npix\n\n\n self.total_psf_pixels = total_psf_pixels", "def __setup_parameters__(self):\r\n self.M=self.N+1\r\n self.u=1+self.pu\r\n self.d=1-self.pd\r\n self.qu=(math.exp((self.r-self.div)*self.dt)-self.d)/(self.u-self.d)\r\n self.qd=1-self.qu", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def set_TranslationsInTiltSeries(self, TiltSeries_):\n for (kk, Proj) in enumerate(TiltSeries_._ProjectionList):\n Proj._alignmentTransX = self._alignmentTransX[kk]\n Proj._alignmentTransY = self._alignmentTransY[kk]", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")", "def set_Y_homog_rot_mtx(angle_rads: float, mtx: numpy.ndarray):\n cosang = numpy.cos(angle_rads)\n sinang = numpy.sin(angle_rads)\n\n mtx[0][0] = mtx[2][2] = cosang\n mtx[0][2] = sinang\n mtx[2][0] = -sinang", "def configure_galaxy(self, galaxy_index, orbital_radius, orbital_particles, theta=0., phi=0., m=None):\n orbital_particles = np.int_(orbital_particles)\n\n if not (galaxy_index == 1 or galaxy_index == 2):\n raise ValueError('Expect either 1 or 2 in galaxy_index')\n else:\n is_g_1 = galaxy_index == 1 # True for galaxy 1 False for galaxy 2\n\n if not isinstance(m, float):\n m = self.M1_feel if is_g_1 else self.M2_feel\n\n if is_g_1:\n if not hasattr(self, '_vx1') or not hasattr(self, '_vy1') or not hasattr(self, '_x1') or not hasattr(self, '_y1'):\n raise UserWarning('Initial phase of body 1 doesn\\'t exist. Please call solve_two_body_problem() first. ' +\n '(Defaulting the initial phase of body 1 to a zero speed at the origin')\n initial_velocity_body = np.zeros((3))\n initial_position_body = np.zeros((3))\n else:\n initial_velocity_body = np.array([self._vx1[0], self._vy1[0], 0])\n initial_position_body = np.array([self._x1[0], self._y1[0], 0])\n else:\n if not hasattr(self, '_vx2') or not hasattr(self, '_vy2') or not hasattr(self, '_x2') or not hasattr(self, '_y2'):\n raise UserWarning('Initial phase of body 2 doesn\\'t exist. Please call solve_two_body_problem() first. ' +\n '(Defaulting the initial phase of body 2 to a zero speed at the origin')\n initial_velocity_body = np.zeros((3))\n initial_position_body = np.zeros((3))\n else:\n initial_velocity_body = np.array([self._vx2[0], self._vy2[0], 0])\n initial_position_body = np.array([self._x2[0], self._y2[0], 0])\n\n # create rotation matrix\n R1 = np.array([[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n R2 = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n R = np.matmul(R2, R1)\n\n galaxy_initial_condition = []\n\n for (radius, particles) in np.nditer([orbital_radius, orbital_particles]):\n angular_space = np.linspace(0, 2 * np.pi, particles, endpoint=False)\n\n # first initialise the componenets in x-y plane in the core's frame\n initial_condition = np.zeros((particles, 6))\n\n initial_condition[:, 0] = radius * np.cos(angular_space)\n initial_condition[:, 1] = radius * np.sin(angular_space)\n # initial_condition[:, 2] = np.zeros\n\n velocity = np.sqrt(self._G_feel * m / radius)\n initial_condition[:, 3] = -velocity * np.sin(angular_space)\n initial_condition[:, 4] = velocity * np.cos(angular_space)\n\n # rotate the vectors\n initial_condition[:, :3] = np.matmul(R, initial_condition[:, :3].T).T\n initial_condition[:, 3:] = np.matmul(R, initial_condition[:, 3:].T).T\n\n # galilean transform the initial velocity to the problem frame, by adding the velocity of core\n initial_condition[:, 3:] += initial_velocity_body # broadcasting\n\n # displace the origin to the location of the core, by adding the position vector of core\n initial_condition[:, :3] += initial_position_body # broadcasting\n\n galaxy_initial_condition.append(initial_condition)\n\n # TwoGalaxyProblem._galaxy#_initial_condition is a list of (n, 6) arrays.\n # Each (n, 6) array corresponds to each orbit,\n # where n is the number of particle in each orbit.\n # The 0, 1, 2 dimension of the last axis is the x, y, z component of each particle.\n # The 3, 4, 5 dimension of the last axis is the conjugate momentum of the x, y, z component of each particle.\n if is_g_1:\n self._galaxy1_initial_condition = galaxy_initial_condition\n else:\n self._galaxy2_initial_condition = galaxy_initial_condition\n\n orbital_orientation = {\n 'theta': theta,\n 'phi': phi\n }\n\n setattr(self, '_galaxy{0:d}_orbital_orientation'.format(galaxy_index), orbital_orientation)\n\n orbitals_properties = list(\n map(lambda radius, particle_number: {\n 'radius': radius,\n 'particle_number': particle_number\n }, orbital_radius, orbital_particles)\n )\n\n setattr(self, '_galaxy{0:d}_orbitals_properties'.format(galaxy_index), orbitals_properties)", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr", "def defineThetaParams(self,param_list,value_list):\n for param,value in zip(param_list,value_list):\n _p=param.split()\n if len(_p)==2:\n self.setMorphParam(_p,value,self.theta_params)\n elif len(_p)==3:\n self.setChannelParam(_p,value,self.theta_params)\n else:\n raise RuntimeError", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def setTranslationsInTiltSeries(self, TiltSeries_):\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n proj.setAlignmentTransX(self._alignmentTransX[kk])\n proj.setAlignmentTransY(self._alignmentTransY[kk])", "def setTheta(self, names, dists=[stats.norm], pars=[(0, 1)]):\n self.q1theta.dtype.names = names\n self.post_theta.dtype.names = names\n if os.path.exists('q1theta'):\n self.q1theta = CP.load(open('q1theta','r'))\n else:\n for n,d,p in zip(names,dists,pars):\n self.q1theta[n] = lhs.lhs(d,p,self.K).ravel()", "def setThetaParams(self,param_list,value_list):\n for param,value in zip(param_list,value_list):\n _p=param.split()\n if (not param in self.theta_params):\n raise RuntimeError(param)\n if len(_p)==2:\n self.setMorphParam(_p,value,None)\n elif len(_p)==3:\n self.setChannelParam(_p,value,None)\n else:\n raise RuntimeError", "def setOptimizableVariables(self, TiltAlignmentParameters_, optimizableVariables):\n ntilt = self._ntilt\n nmark = len(self._Markers)\n\n nopti = (nmark - 1) * 3\n\n if self.optimizeMarkerPositions:\n # translation\n nopti += (ntilt) * 2\n\n # variable magnifications for projections, exclude scaling of reference image (S==1)\n if TiltAlignmentParameters_.dmag:\n nopti += ntilt - 1\n\n #variable rotation for projections\n if TiltAlignmentParameters_.drot:\n nopti += ntilt\n else:\n nopti += 1\n\n # beam tilt\n if TiltAlignmentParameters_.dbeam:\n nopti += 1\n\n # nopti += ntilt\n ## gradient on image rotation and magnification in projections\n #if TiltAlignmentParameters_.dGradRotMag:\n # nopti = nopti + 2\n\n # check that number of variables is ok\n if len(optimizableVariables) != nopti:\n print(\"Length optimizableVariables: \" + str(len(optimizableVariables)))\n print(\"N optmization: \" + str(nopti))\n raise IndexError('length of optimizableVariables does not match TiltAlignmentParameters')\n\n # marker 3D coords\n ivar = 0\n\n\n for (imark, Marker) in enumerate(self._Markers):\n # reference marker irefmark is fixed to standard value\n if ((imark ) != TiltAlignmentParameters_.irefmark):\n r = numpy.array([optimizableVariables[ivar],\n optimizableVariables[ivar + 1], optimizableVariables[ivar + 2]])\n self._Markers[imark].set_r(r)\n\n ivar = ivar + 3\n\n\n if self.optimizeMarkerPositions:\n # translations\n for itilt in range(0, ntilt):\n # translation in reference projection is zero\n #FFif (self._projIndices[itilt] != TiltAlignmentParameters_.ireftilt):\n self._alignmentTransX[itilt] = optimizableVariables[ivar]\n self._alignmentTransY[itilt] = optimizableVariables[ivar + 1]\n ivar = ivar + 2\n\n\n\n # magnification changes\n if TiltAlignmentParameters_.dmag:\n for itilt in range(0, ntilt):\n # magnification of reference projection is 1.\n if (int(self._projIndices[itilt]) != int(self._projIndices[self.ireftilt])):\n self._alignmentMagnifications[itilt] = optimizableVariables[ivar]\n ivar = ivar + 1\n\n # image rotations\n if TiltAlignmentParameters_.drot:\n for itilt in range(0, ntilt):\n self._alignmentRotations[itilt] = optimizableVariables[ivar]\n ivar = ivar + 1\n # all rotations are the same - take the first one\n else:\n self._alignmentRotations[0] = optimizableVariables[ivar]\n ivar = ivar + 1\n\n\n\n # beam inclination\n if TiltAlignmentParameters_.dbeam:\n self._alignmentBeamTilt = optimizableVariables[ivar]\n ivar = ivar + 1\n\n # focus gradient (TODO)\n #if TiltAlignmentParameters_.dGradRotMag:\n # optimizableVariables[ivar] = self._alignmentMagnFoc\n # optimizableVariables[ivar+1] = self._alignmentRotFoc\n\n\n if not self.optimizeMarkerPositions:\n from pytom.scripts.Rotation_function import calculate_translation\n\n\n # r_model is the modelled x,y,z coordinate of the reference marker\n r_model = self._Markers[self.irefmark].get_r()\n\n # if using a reduced set using an indices existing in the reduced set\n # i = int(numpy.argwhere(self.TiltSeries_._projIndices.astype(int) == self.TiltSeries_._TiltAlignmentParas.ireftilt)[0][0])\n psi_ref = numpy.deg2rad(numpy.mean(self._alignmentRotations) + 90)\n\n for iproj in range(0,ntilt):\n # setting variables\n marker = self._Markers[self.irefmark]\n r_exp_tilt = numpy.array([marker.get_xProj(iproj), marker.get_yProj(iproj)]) - numpy.array(\n self.TiltSeries_._TiltAlignmentParas.cent)\n psi_itilt = numpy.deg2rad(self._alignmentRotations[iproj] + 90)\n theta_itilt = numpy.deg2rad(self._tiltAngles[iproj])\n magnification =self._alignmentMagnifications[iproj]\n\n # calculating translation setting difference model and experimental reference marker point at 0\n tx, ty = calculate_translation(r_model, r_exp_tilt, psi_ref, psi_itilt, theta_itilt, magnification)\n\n\n self._alignmentTransX[iproj] = tx\n self._alignmentTransY[iproj] = ty\n\n\n\n # print(self.irefmark, self._alignmentTransX[self.ireftilt], self._alignmentTransY[self.ireftilt])\n # for itilt in range(ntilt):\n # self.q[itilt] = optimizableVariables[ivar]\n # ivar += 1", "def _write_header(self, header):\n # write out telescope and source information\n header[\"latitude\"] = self.telescope_location_lat_lon_alt_degrees[0]\n header[\"longitude\"] = self.telescope_location_lat_lon_alt_degrees[1]\n header[\"altitude\"] = self.telescope_location_lat_lon_alt_degrees[2]\n header[\"telescope_name\"] = np.string_(self.telescope_name)\n header[\"instrument\"] = np.string_(self.instrument)\n header[\"object_name\"] = np.string_(self.object_name)\n\n # write out required UVParameters\n header[\"Nants_data\"] = self.Nants_data\n header[\"Nants_telescope\"] = self.Nants_telescope\n header[\"Nbls\"] = self.Nbls\n header[\"Nblts\"] = self.Nblts\n header[\"Nfreqs\"] = self.Nfreqs\n header[\"Npols\"] = self.Npols\n header[\"Nspws\"] = self.Nspws\n header[\"Ntimes\"] = self.Ntimes\n header[\"antenna_numbers\"] = self.antenna_numbers\n header[\"uvw_array\"] = self.uvw_array\n header[\"vis_units\"] = np.string_(self.vis_units)\n header[\"channel_width\"] = self.channel_width\n header[\"time_array\"] = self.time_array\n header[\"freq_array\"] = self.freq_array\n header[\"integration_time\"] = self.integration_time\n header[\"lst_array\"] = self.lst_array\n header[\"polarization_array\"] = self.polarization_array\n header[\"spw_array\"] = self.spw_array\n header[\"ant_1_array\"] = self.ant_1_array\n header[\"ant_2_array\"] = self.ant_2_array\n header[\"antenna_positions\"] = self.antenna_positions\n\n # handle antenna_names; works for lists or arrays\n header[\"antenna_names\"] = np.asarray(self.antenna_names, dtype=\"bytes\")\n\n # write out phasing information\n header[\"phase_type\"] = np.string_(self.phase_type)\n if self.phase_center_ra is not None:\n header[\"phase_center_ra\"] = self.phase_center_ra\n if self.phase_center_dec is not None:\n header[\"phase_center_dec\"] = self.phase_center_dec\n if self.phase_center_epoch is not None:\n header[\"phase_center_epoch\"] = self.phase_center_epoch\n if self.phase_center_frame is not None:\n header[\"phase_center_frame\"] = np.string_(self.phase_center_frame)\n\n # write out optional parameters\n if self.dut1 is not None:\n header[\"dut1\"] = self.dut1\n if self.earth_omega is not None:\n header[\"earth_omega\"] = self.earth_omega\n if self.gst0 is not None:\n header[\"gst0\"] = self.gst0\n if self.rdate is not None:\n header[\"rdate\"] = np.string_(self.rdate)\n if self.timesys is not None:\n header[\"timesys\"] = np.string_(self.timesys)\n if self.x_orientation is not None:\n header[\"x_orientation\"] = np.string_(self.x_orientation)\n if self.blt_order is not None:\n header[\"blt_order\"] = np.string_(\", \".join(self.blt_order))\n if self.antenna_diameters is not None:\n header[\"antenna_diameters\"] = self.antenna_diameters\n if self.uvplane_reference_time is not None:\n header[\"uvplane_reference_time\"] = self.uvplane_reference_time\n if self.eq_coeffs is not None:\n header[\"eq_coeffs\"] = self.eq_coeffs\n if self.eq_coeffs_convention is not None:\n header[\"eq_coeffs_convention\"] = np.string_(self.eq_coeffs_convention)\n\n # write out extra keywords if it exists and has elements\n if self.extra_keywords:\n extra_keywords = header.create_group(\"extra_keywords\")\n for k in self.extra_keywords.keys():\n if isinstance(self.extra_keywords[k], str):\n extra_keywords[k] = np.string_(self.extra_keywords[k])\n else:\n extra_keywords[k] = self.extra_keywords[k]\n\n # write out history\n header[\"history\"] = np.string_(self.history)\n\n return", "def set_phi(self):\n self.phi = float(dihedral(self.O5.getXYZ(), self.C1.getXYZ(), self.GO.getXYZ(), self.CX.getXYZ()))", "def init_solid_params(eos_d):\n # All units must be per atom (to make sense for arbitrary composition)\n\n models.Control.set_consts( [], [], eos_d )\n\n const_d = eos_d['const_d']\n\n Nat_cell = 20\n Nat_formula = 5\n\n T0 = 300 # K\n\n # EOS Parameter values initially set by Mosenfelder2009\n # Set model parameter values\n mass_avg = (24.31+28.09+3*16.0)/5.0 # g/(mol atom)\n S0 = 0.0 # must adjust\n param_key_a = ['T0','S0','mass_avg']\n param_val_a = np.array([T0,S0,mass_avg])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n # V0 = (38.575*1e-5)*mass_avg/eos_d['const_d']['Nmol']/1e3*1e30 # ang^3/atom\n V0 = 162.35/Nat_cell # ang^3/atom\n K0 = 254.7 # GPa\n KP0= 4.26\n E0 = 0.0\n param_key_a = ['V0','K0','KP0','E0']\n param_val_a = np.array([V0,K0,KP0,E0])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n VR = V0\n thetaR = 736 # K\n gammaR = 2.23\n qR = 1.83\n param_key_a = ['VR','thetaR','gammaR','qR']\n param_val_a = np.array([VR,thetaR,gammaR,qR])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n # NOTE: Mosenfelder(2009) has mislabeled units as J/K/g\n # -> units are actually J/K/kg ???\n # The measured 1000K heat capacity of MgSiO3 is ~125 J/K/mol\n # (equal to Dulong Petit value for 5 atom basis)\n # -> This value is thus ~65% of that nominal value,\n # balancing the 30 to 40% values of gamma that are higher than other\n # studies (static compression only constrains Gamma*Cv\n #\n # Max const-vol heat capacity:\n Cvmax = (806.0/1e3)*mass_avg/const_d['kJ_molpereV']/1e3 # J/mol atoms/K -> eV/K/atom\n\n param_key_a = ['Cvmax']\n param_val_a = np.array([Cvmax])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n # # Must convert energy units from kJ/g to eV/atom\n energy_conv_fac = mass_avg/eos_d['const_d']['kJ_molpereV']\n models.Control.set_consts( ['energy_conv_fac'], [energy_conv_fac], eos_d )\n\n\n compress_path_mod = models.BirchMurn3(path_const='S',level_const=T0,\n supress_energy=False,\n supress_press=False,\n expand_adj=False)\n models.Control.set_modtypes( ['CompressPathMod'], [compress_path_mod],\n eos_d )\n\n gamma_mod = models.GammaPowLaw(V0ref=False)\n models.Control.set_modtypes( ['GammaMod'], [gamma_mod], eos_d )\n\n thermal_mod = models.MieGrunDebye()\n models.Control.set_modtypes( ['ThermalMod'], [thermal_mod], eos_d )\n\n full_mod = models.ThermalPressMod()\n models.Control.set_modtypes( ['FullMod'], [full_mod], eos_d )\n\n\n return eos_d", "def write_initparams(params, outdir, padding_var=7, paramsfn='parameters', skiplat=False, skipglat=False):\n paramfile = outdir + paramsfn + '.txt'\n with open(paramfile, 'w') as myfile:\n myfile.write('# Parameters\\n')\n\n dio.ensure_dir(outdir)\n for key in params:\n if key == 'reg1' or key == 'reg2' or key == 'reg3':\n np.savetxt(outdir + key + '.txt', params[key], fmt='%d', delimiter=',', header=key + ' particle IDs')\n if key == 'xyv0':\n np.savetxt(outdir + 'xyv0.txt', params['xyv0'], delimiter=',',\n header='xy0 (initial positions) v0 (initial velocities)')\n elif key == 'xy':\n if not skiplat:\n np.savetxt(outdir + 'xy.txt', params['xy'], delimiter=',',\n header='xy0 (undeformed lattice positions from mesh)')\n elif key == 'KL':\n if not skiplat:\n np.savetxt(outdir + 'KL.txt', params['KL'], fmt='%i', delimiter=',',\n header='KL (Bond Connectivity List)')\n elif key == 'NL':\n if not skiplat:\n np.savetxt(outdir + 'NL.txt', params['NL'], fmt='%i', delimiter=',', header='NL (Neighbor List)')\n elif key == 'BND':\n np.savetxt(outdir + 'BND.txt', params['BND'], fmt='%i', header='BND (Boundary List)')\n elif key == 'OmK':\n if not skipglat:\n np.savetxt(outdir + 'OmK.txt', params['OmK'], fmt='%f', delimiter=',',\n header='OmK (spring frequency array, for Nash limit: (-1)^(c+b)kl^2/Iw')\n elif key == 'OmG':\n if not skipglat:\n np.savetxt(outdir + 'Omg.txt', params['OmG'], fmt='%f', delimiter=',',\n header='Omg (gravitational frequency array, for Nash limit: (-1)^(c+1)mgl/Iw')\n elif key == 'LVUC':\n if not skiplat:\n np.savetxt(outdir + 'LVUC.txt', params['LVUC'], fmt='%i', delimiter=',',\n header='Lattice Vector and Unit cell vector coordinates')\n else:\n with open(paramfile, 'a') as myfile:\n # print 'Writing param ', str(key)\n # print ' with value ', str(params[key])\n # print ' This param is of type ', type(params[key])\n\n if isinstance(params[key], str):\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + params[key] + '\\n')\n elif isinstance(params[key], np.ndarray):\n # print params[key].dtype\n if key == 'BIND':\n print 'BIND = ', str(params[key]).replace('\\n', '')\n\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + \", \".join(np.array_str(params[key]).split()).replace('[,', '[') + '\\n')\n # if params[key].dtype == 'float64':\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ np.array_str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n # elif params[key].dtype == 'int32':\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n # else:\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n elif isinstance(params[key], list):\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + str(params[key]) + '\\n')\n else:\n # print key, ' = ', params[key]\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + '{0:.12e}'.format(params[key]) + '\\n')\n\n # elif key == 'LV':\n # np.savetxt(outdir+'LV.txt',params['LV'], fmt='%18e',delimiter=',', header='Lattice Vector coordinates')\n # elif key == 'UC':\n # np.savetxt(outdir+'UC.txt',params['UC'], fmt='%18e',delimiter=',', header='Unit cell vector coordinates')\n #\n # elif key == 'h':\n # with open(outdir+'h.txt', \"w\") as hfile:\n # hfile.write(\"# h (time step) \\n{0:5e}\".format(h) )\n # elif key == 'beta':\n # with open(outdir+'beta.txt', \"w\") as betafile:\n # betafile.write(\"# beta (damping coeff) \\n{0:5e}\".format(beta) )", "def write_header(self, *, version=3.01, file_type='O: Observation', satellite_type='M: Mixed GNSS',\n run_by='GPSLiDAR', organization='CCAR', observer='Adam Dodge', agency='CCAR', receiver_num='1',\n receiver_type='GENERIC_P1', receiver_vers='1.0.0', antenna_number=1, antenna_type='RTK2-F9P',\n delta_pos=[0,0,0]):\n markerstr = 'GPS LiDAR System at ' + self.longname\n if not os.path.isfile(self.fname):\n tstr = self.t.strftime('%Y%m%d %H%M%S')\n # TODO: Fix header (not working in readers)\n r = 6371000 + self.alt\n x = r * np.cos(self.lat * np.pi/180) * np.cos(self.lon * np.pi/180)\n y = r * np.cos(self.lat * np.pi/180) * np.sin(self.lon * np.pi/180)\n z = r * np.sin(self.lat * np.pi/180)\n header = f'{version:>9.2f}{\" \":<11s}{file_type:<20s}{satellite_type:<20s}{\"RINEX VERSION / TYPE\":<20s}\\n' + \\\n f'{run_by:<20s}{organization:<20s}{tstr:<16s}UTC {\"PGM / RUN BY / DATE\":<20s}\\n' + \\\n f'{markerstr:<60}{\"MARKER NAME\":<20s}\\n' + \\\n f'{self.station:<60}{\"MARKER NUMBER\":<20s}\\n' + \\\n f'{\"GEODETIC\":<20s}{\" \":40s}{\"MARKER TYPE\":<20s}\\n' + \\\n f'{observer:<20}{agency:<40}{\"OBSERVER / AGENCY\":<20s}\\n' + \\\n f'{receiver_num:<20}{receiver_type:<20}{receiver_vers:<20}{\"REC # / TYPE / VERS\":<20s}\\n' + \\\n f'{antenna_number:<20}{antenna_type:<40s}{\"ANT # / TYPE\":<20s}\\n' + \\\n f'{x:14.4f}{y:>14.4f}{z:>14.4f}{\" \":18s}{\"APPROX POSITION XYZ\":<20s}\\n' + \\\n f'{delta_pos[0]:14.4f}{delta_pos[1]:>14.4f}{delta_pos[2]:>14.4f}{\" \":18s}{\"ANTENNA: DELTA H/E/N\":<20s}\\n' + \\\n f'G {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'R {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'E {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'S {8:<3d} C1 L1 D1 S1 C5 L5 D5 S5 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'{\"DBHZ\":<60s}{\"SIGNAL STRENGTH UNIT\":<20s}\\n' + \\\n f'{self.t.year:>6d}{self.t.month:>6d}{self.t.day:>6d}{self.t.hour:>6d}{self.t.minute:>6d}' + \\\n f'{self.t.second:>13.7f} UTC{\" \":<9s}{\"TIME OF FIRST OBS\":<20s}\\n' + \\\n f' 0{\" \":54s}{\"RCV CLOCK OFFS APPL\":<20s}\\n' + \\\n f'G{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'R{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'E{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'S{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'{self.leapS:>6d}{\" \":>54s}{\"LEAP SECONDS\":<20s}\\n' + \\\n f'{\" \":>60s}{\"END OF HEADER\":<20s}\\n'\n\n try:\n with open(self.fname, 'w') as f:\n f.write(header)\n except FileNotFoundError:\n print('Data directory is bad. Try again.')\n sys.exit(0)", "def _init_projection(self):\n radius = 6370e3\n \n # Spherical latlon used by WRF\n self.latlon_sphere = pyproj.Proj(proj='latlong',\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n # Lambert Conformal Conic used by WRF\n self.lambert_grid = pyproj.Proj(proj='lcc',\n lat_1=self.truelats[0],\n lat_2=self.truelats[1],\n lat_0=self.ref_latlon[0],\n lon_0=self.stand_lon,\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n grid_size_i = (self.domain_size[0] - 2) * self.cell_size[0]\n grid_size_j = (self.domain_size[1] - 2) * self.cell_size[1]\n\n grid_center_i, grid_center_j = pyproj.transform(\n self.latlon_sphere, self.lambert_grid,\n self.ref_latlon[1], self.ref_latlon[0])\n \n self.offset_i = grid_center_i - grid_size_i * .5\n self.offset_j = grid_center_j - grid_size_j * .5", "def define_projection(self, region):\n region = {\n \"start_longitude\": region[0],\n \"end_longitude\": region[1],\n \"start_latitude\": region[2],\n \"end_latitude\": region[3],\n }\n projection = \"LambertConformal\"\n plotextend = [\n region[\"start_longitude\"],\n region[\"end_longitude\"],\n region[\"start_latitude\"],\n region[\"end_latitude\"],\n ]\n if projection == \"LambertConformal\":\n # plotextend has to be a little larger so everything is on there\n plotextend = [\n plotextend[0] - 1.0,\n plotextend[1] + 1.0,\n plotextend[2] - 1.0,\n plotextend[3] + 1.0,\n ]\n # path to cut out is exact though\n lons = self.region_to_square(region, \"longitude\")\n lats = self.region_to_square(region, \"latitude\")\n path_ext = [[lon, lat] for lon, lat in zip(lons, lats)]\n path_ext = mpath.Path(path_ext).interpolated(20)\n # South Hemisfere\n if region[\"start_latitude\"] <= 0 and region[\"end_latitude\"] <= 0:\n proj = ccrs.LambertConformal(\n central_longitude=np.sum(plotextend[:2]) / 2.0,\n central_latitude=np.sum(plotextend[2:]) / 2.0,\n cutoff=+30,\n standard_parallels=(-33, -45),\n )\n # North Hemisphere\n else:\n proj = ccrs.LambertConformal(\n central_longitude=np.sum(plotextend[:2]) / 2.0,\n central_latitude=np.sum(plotextend[2:]) / 2.0,\n )\n return proj, path_ext, plotextend", "def setup_fitting_init_pars(inparam, night, band, masterbeam, order):\n\n # Determine whether IGRINS mounting was loose or\n # the night of interest is in question\n if (int(night) < 20180401) or (int(night) > 20190531):\n IPpars = inparam.ips_tightmount_pars[band][masterbeam][order]\n else:\n IPpars = inparam.ips_loosemount_pars[band][masterbeam][order]\n\n # start at bucket loc = 1250 +- 100, width = 250 +- 100,\n # depth = 100 +- 5000 but floor at 0\n centerloc = 1250 if band == 'H' else 1180\n\n # Initialize parameter array for optimization as well as half-range values\n # for each parameter during the various steps of the optimization.\n # Many of the parameters initialized here will be changed throughout the\n # code before optimization and in between optimization steps.\n\n parA0 = np.array([\n 0.0, # 0: The shift of the stellar template (km/s)\n 0.0, # 1: The scale factor for the stellar template\n 0.0, # 2: The shift of the telluric template (km/s)\n 1.0, # 3: The scale factor for the telluric template\n 0.0, # 4: vsini (km/s)\n IPpars[2], # 5: The instrumental resolution (FWHM) in pixels\n 0.0, # 6: Wavelength 0-pt\n 0.0, # 7: Wavelength linear component\n 0.0, # 8: Wavelength quadratic component\n 0.0, # 9: Wavelength cubic component\n 1.0, #10: Continuum zero point\n 0.0, #11: Continuum linear component\n 0.0, #12: Continuum quadratic component\n IPpars[1], #13: Instrumental resolution linear component\n IPpars[0], #14: Instrumental resolution quadratic component\n centerloc, #15: Blaze dip center location\n 330, #16: Blaze dip full width\n 0.05, #17: Blaze dip depth\n 90, #18: Secondary blaze dip full width\n 0.05, #19: Blaze dip depth\n 0.0, #20: Continuum cubic component\n 0.0, #21: Continuum quartic component\n 0.0, #22: Continuum quintic component\n 0.0, #23: Continuum hexic component\n 0.0, #24: secondary par\n 0.0, #25: secondary par\n 0.0, #26: secondary par\n 0.0 #27: secondary par\n ])\n\n return parA0", "def __init__(self,):\r\n self.g = 9.81\r\n self.l = 0.5\r\n self.m1 = 1.0\r\n self.m2 = 1.0\r\n self.m3 = 1.0\r\n self.r1 = 1.0\r\n self.r2 = 1.0\r\n self.tau = 0.001\r\n self.theta1 = 1.0\r\n self.theta2 = 1.0\r\n self.theta3 = 1.0", "def _setOutputFrame(self, pars): \n _sky_field = None\n\n if pars['refimage'] != '' and pars['refimage'] != None:\n # Use the following if the refimage isn't actually going to be\n # drizzled, we just want to set up the pydrizzle object\n #\n _refimg = wcsutil.WCSObject(pars['refimage'])\n refimg_wcs = _refimg.copy()\n\n # If the user also specified a rotation to be applied,\n # apply that as well...\n if pars['rot']:\n _orient = pars['rot']\n else:\n _orient = refimg_wcs.orientat\n\n # Now, build output WCS using the SkyField class\n # and default product's WCS as the initial starting point.\n #\n _sky_field = pydrizzle.SkyField(wcs=refimg_wcs)\n # Update with user specified scale and rotation\n _sky_field.set(psize=pars['scale'],orient=_orient)\n\n elif pars['rot'] != None or \\\n pars['scale'] != None or \\\n pars['ra'] != None or \\\n pars['outnx'] != None:\n\n _sky_field = pydrizzle.SkyField()\n\n if pars['rot'] == None:\n _orient = self.assoc.observation.product.geometry.wcslin.orient\n else:\n _orient = pars['rot']\n\n # Need to account for non-existent specification of shape\n # when setting up output field parameters.\n if pars['outnx'] == None: _shape = None\n else: _shape = (pars['outnx'],pars['outny'])\n \n print 'Default orientation for output: ',_orient,'degrees'\n\n _sky_field.set(psize=pars['scale'], orient=_orient,\n ra=pars['ra'], dec=pars['dec'], shape=_shape)\n\n # Now that we have built the output frame, let the user know\n # what was built...\n if _sky_field != None:\n print ('\\n Image parameters computed from reference image WCS: \\n')\n print _sky_field.wcs\n\n # Apply user-specified output to ASN using the resetPars method.\n # If field==None, it will simply reset to default case.\n #\n self.assoc.resetPars(field=_sky_field,\n pixfrac=pars['pixfrac'],\n kernel=pars['kernel'])", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def setup_orbit(self, t, halo_gas_density, galaxy_velocity):\n \n if any( [halo_gas_density > 1.0E-10] ) : # convert to mass density\n halo_gas_density = halo_gas_density * self.ic['mu_halo'] * cgs.mp\n \n # if t is an array, then use a cubic spline to make a function from the orbital\n # data. If t is a single value, then halo gas dnesity and velocity are constants..\n # make them into functions anyway to make rest of everything work...\n if np.size(halo_gas_density) > 1 : \n self.halo_density = interpolate.UnivariateSpline(t, halo_gas_density,k=3)\n else:\n self.halo_density = lambda x: halo_gas_density\n \n if np.size(galaxy_velocity) > 1:\n self.galaxy_velocity = interpolate.UnivariateSpline(t, galaxy_velocity ,k=3)\n else:\n self.galaxy_velocity = lambda x: galaxy_velocity", "def __SetSFParams(self):\n\n # If radial structure functions are in output\n if self.__containsRadial:\n # Defines radial attributes\n self.__nc_RSoft_O.radial_error_tolerance = self.etol_radial\n\n # Defines radial dimensions\n self.__nc_RSoft_O.createDimension('radial_structure_functions',\\\n len(self.mus))\n\n # Defines radial variables\n mus_var_id_O = self.__nc_RSoft_O.createVariable('mus', \\\n 'f4', ('radial_structure_functions'))\n Ls_var_id_O = self.__nc_RSoft_O.createVariable('Ls', \\\n 'f4', ('radial_structure_functions'))\n radial_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Xs', 'i4', ('radial_structure_functions'))\n radial_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Ys', 'i4', ('radial_structure_functions'))\n\n # Sets radial structure function variables\n mus_var_id_O[:] = self.mus\n Ls_var_id_O[:] = self.Ls\n radial_Xs_var_id_O[:] = self.radial_Xs\n radial_Ys_var_id_O[:] = self.radial_Ys\n\n # If angular structure functions are in output\n if self.__containsAngular:\n # Defines angular attributes\n self.__nc_RSoft_O.angular_error_tolerance = self.etol_angular\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('angular_structure_functions',\\\n len(self.xis))\n\n # Defines angular variables\n xis_var_id_O = self.__nc_RSoft_O.createVariable('xis', \\\n 'f4', ('angular_structure_functions'))\n zetas_var_id_O = self.__nc_RSoft_O.createVariable('zetas', \\\n 'i4', ('angular_structure_functions'))\n lambdas_var_id_O = self.__nc_RSoft_O.createVariable('lambdas', \\\n 'i4', ('angular_structure_functions'))\n angular_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Xs', 'i4', ('angular_structure_functions'))\n angular_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Ys', 'i4', ('angular_structure_functions'))\n angular_Zs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Zs', 'i4', ('angular_structure_functions')) \n\n # Sets angular structure function variables\n xis_var_id_O[:] = self.xis\n zetas_var_id_O[:] = self.zetas\n lambdas_var_id_O[:] = self.lambdas\n angular_Xs_var_id_O[:] = self.angular_Xs\n angular_Ys_var_id_O[:] = self.angular_Ys\n angular_Zs_var_id_O[:] = self.angular_Zs", "def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)", "def set_params2D(ima, p, xform = \"xform.align2d\"):\n\tt = Transform({\"type\":\"2D\",\"alpha\":p[0],\"tx\":p[1],\"ty\":p[2],\"mirror\":p[3],\"scale\":p[4]})\n\tima.set_attr(xform, t)", "def set_parameters(pars):\n y0=[]\n fun=None \n state_evol=None\n if pars[\"state_law\"]==0:\n state_evol=state_evol_d\n elif pars[\"state_law\"]==1:\n state_evol=state_evol_r\n elif pars[\"state_law\"]==2:\n state_evol=state_evol_p\n elif pars[\"state_law\"]==3:\n state_evol=state_evol_n\n \n if pars[\"model\"]==0:\n y0 = [pars[\"Vpl\"]*0.9,0.1,pars[\"sigma1\"]]\n fun = fun_qds\n damping = pars[\"nu\"]\n \n if pars[\"model\"]==1:\n y0 = [pars[\"Vpl\"]*0.9, 0.1,pars[\"sigma1\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fds\n damping = pars[\"m\"]\n\n if pars[\"model\"]==2:\n y0 = [pars[\"Vpl\"]*0.99,pars[\"Vpl\"], pars[\"Vpl\"],0.1,pars[\"sigma1\"],pars[\"sigma2\"]]\n fun= fun_qdc\n damping = pars[\"nu\"]\n\n if pars[\"model\"]==3:\n y0 = [pars[\"Vpl\"]*1.1,pars[\"Vpl\"], pars[\"Vpl\"],0.0,pars[\"sigma1\"],pars[\"sigma2\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fdc\n damping = pars[\"m\"]\n\n return (np.array(y0), state_evol, fun, damping)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def set_pub_robot_pose(self, x, y, yaw):\r\n self.publisher_robot.set_pose_by_center(x, y, yaw)", "def set_body_frame_position_vectors(pa):\n nb = pa.nb[0]\n # loop over all the bodies\n for i in range(nb):\n fltr = np.where(pa.body_id == i)[0]\n cm_i = pa.cm[3 * i:3 * i + 3]\n R_i = pa.R[9 * i:9 * i + 9]\n for j in fltr:\n dx = pa.x[j] - cm_i[0]\n dy = pa.y[j] - cm_i[1]\n dz = pa.z[j] - cm_i[2]\n\n pa.dx0[j] = (R_i[0] * dx + R_i[3] * dy + R_i[6] * dz)\n pa.dy0[j] = (R_i[1] * dx + R_i[4] * dy + R_i[7] * dz)\n pa.dz0[j] = (R_i[2] * dx + R_i[5] * dy + R_i[8] * dz)", "def set_parameters(self):\n params = {}\n if self.modelname == 'SI':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after splot\n # Ts: Time from split to present, in 2*Na generation units\n names = ['N1', 'N2', 'Ts']\n values = [1, 1, 1]\n upper_bounds = [20, 20, 10]\n lower_bounds = [0.01, 0.01, 0]\n elif self.modelname == 'IM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Ts']\n values = [1, 1, 1, 1, 1]\n upper_bounds = [20, 20, 20, 20, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0]\n elif self.modelname == 'AM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Tam', 'Ts']\n values = [1, 1, 1, 1, 0.1, 1]\n upper_bounds = [20, 20, 20, 20, 2, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'SC':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n names = ['N1', 'N2', 'm21', 'm12', 'Ts', 'Tsc']\n values = [1, 1, 1, 1, 1, 0.1]\n upper_bounds = [20, 20, 20, 20, 10, 2]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'IM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'AM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Tam', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 0.1, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 2, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'SC2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'Tsc', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 2, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n params['Names'] = names\n params['Values'] = values\n params['Upper'] = upper_bounds\n params['Lower'] = lower_bounds\n return params", "def write_settings(self, settings_file):\n lines = []\n lines.append(f'parameters.pixelSize = {self.pixel_size};')\n lines.append(f'parameters.wavelength = {self.wavelength};')\n lines.append(f'parameters.distance = {self.distance};')\n lines.append(f'parameters.unitCell = {list(self.lattice_settings)};')\n lines.append(f'parameters.ubMat = {str(self.UBmat.tolist())};')\n lines.append(f'parameters.oMat = {str(self.Omat.tolist())};')\n lines.append('parameters.oVec = [0,0,0];')\n lines.append(f'parameters.det0x = {self.xc};')\n lines.append(f'parameters.det0y = {self.yc};')\n lines.append('parameters.xTrans = [0,0,0];')\n lines.append(\n f'parameters.orientErrorDetPitch = {self.pitch * radians};')\n lines.append(f'parameters.orientErrorDetRoll = {self.roll * radians};')\n lines.append(f'parameters.orientErrorDetYaw = {self.yaw * radians};')\n lines.append(\n f'parameters.orientErrorGonPitch = {self.theta * radians};')\n lines.append('parameters.twoThetaCorrection = 0;')\n lines.append(f'parameters.twoThetaNom = 0;')\n lines.append(f'parameters.twoThetaStep = 0;')\n lines.append('parameters.omegaCorrection = 0;')\n lines.append(f'parameters.omegaNom = {self.omega * radians};')\n lines.append(f'parameters.omegaStep = 0;')\n lines.append('parameters.chiCorrection = 0;')\n lines.append(f'parameters.chiNom = {self.chi * radians};')\n lines.append(f'parameters.chiStep = 0;')\n lines.append('parameters.phiCorrection = 0;')\n lines.append(f'parameters.phiNom = {self.phi * radians};')\n lines.append(f'parameters.phiStep = {self.phi_step * radians};')\n lines.append(f'parameters.gridOrigin = {self.grid_origin};')\n lines.append(f'parameters.gridBasis = {self.grid_basis};')\n lines.append(f'parameters.gridDim = {self.grid_step};')\n lines.append('parameters.gridOffset = [0,0,0];')\n lines.append('parameters.extraFlip = false;')\n lines.append(f'outputData.dimensions = {list(self.grid_shape)};')\n lines.append('outputData.chunkSize = [50,50,50];')\n lines.append('outputData.compression = 0;')\n lines.append('transformer.transformOptions = 0;')\n lines.append('transformer.oversampleX = 1;')\n lines.append('transformer.oversampleY = 1;')\n lines.append('transformer.oversampleZ = 4;')\n with open(settings_file, 'w') as f:\n f.write('\\n'.join(lines))", "def get_phase_space(self, grid_flag):\n\n f = h5py.File(self.xs_path, 'r')\n self.N = f['paramdescrip']['NVALUE'].value # det maximum range Ni for each d_i\n phase_space = {}\n order = {}\n NPAR = f['paramdescrip']['NPAR'].value[0]\n for di in range(NPAR - 1):\n di_name = f['paramdescrip']['PARNAM'].value[di] # get names for dimensions. Starts at 0\n # get values for dimensions. Starts at 1. e.g. 'BURNUP': array([ 0., 9.35253143, 18.70503998,..\n # Is saved as a np.array, of floats64 FORTRAN-contiguous\n phase_space[di_name] = np.array([float(val) for val in f['paramvaleurs'][\n 'pval %d' % (di + 1)].value], order='F')\n order[di] = di_name # e.g. '7': 'BURNUP'\n\n iso_aux = []\n # just concatenate those two\n for iso in f['contenu']['NOMISO'].value[:]:\n iso_aux.append(iso)\n for iso in f['contenu']['NOMMAC'].value[:]:\n iso_aux.append(iso)\n f.close()\n self.iso_A2 = iso_aux\n\n # USER IMPOSED: Non-independant variables set to [0].\n \"\"\"\n *Do not eliminate them, this will bring problems with the cartesin product later one\n *if instead of '[phase_space['PHASE'][0]]' (which is equal to 1) just '[1]' is written then np.where() does not recognize the value.\n\n This two problems rise from the decision of defining the 'space of interest' as a subset from the 'phase space' which in time is read directly from the H5F file. Later several comparisons are made between the two. The upside is the need for no explicit declaration of the phase-space thus minimizing chances of un-noticed error in domain assignation.\n \"\"\"\n if 'PHASE' in phase_space.keys():\n phase_space['PHASE'] = [phase_space['PHASE'][0]]\n if 'BURNUPstep' in phase_space.keys():\n phase_space['BURNUPstep'] = [phase_space['BURNUPstep'][0]]\n\n if grid_flag == 'SG': # major update required\n \"\"\"\n In contras to FG, the stored values in the concatenated SAPHYB file only considers different burnup steps, i.e a set of values [0, 500, 500, 100] are stored as [0, 500, 100]. Two posibilities remain, read the BURNUP value from the single XS files separatly or load a pickeled object with the phase space. The second option was implemented.\n \"\"\"\n with open(self.file_path + self.xs_folder + 'phase_space.pickle', 'rb') as handle:\n phase_space_pk = pickle.load(handle)\n phase_space_pk.pop('a')\n phase_space_pk.pop('d')\n phase_space_pk.pop('l')\n phase_space_pk.pop('BURNUP_evol')\n phase_space_pk.pop('BURNUP_steps')\n phase_space = phase_space_pk\n\n self.phase_space, self.order, self.d, self.NPAR = phase_space, order, len(order), NPAR", "def orbitproject(x,y,inc,phi=0,psi=0):\n\n x2 = x*np.cos(phi) + y*np.sin(phi)\n y2 = -x*np.sin(phi) + y*np.cos(phi)\n z2 = y2*np.sin(inc)\n y2 = y2*np.cos(inc)\n\n xf = x2*np.cos(psi) - y2*np.sin(psi)\n yf = x2*np.sin(psi) + y2*np.cos(psi)\n\n return (xf,yf,z2)", "def stereographic_projection(phi_degree, psi_degree):\n psi_rad = psi_degree *np.pi/180\n psi_stereo = 2*np.tan(psi_rad/2)\n\n phi_rad = phi_degree *np.pi/180\n return phi_rad, psi_stereo", "def load_phi(self, **kwargs):\r\n msf12 = kwargs['msf12']\r\n msf06 = kwargs['msf06']\r\n\r\n self.phi = PHI(msf12, msf06)\r\n self.zeta = 0.01 * np.ones([self.phi.num_modes])", "def __init__(self, camID, camera_cal_file='camera_cal_bnl.yaml'):\n self.camID=camID\n with open(camera_cal_file,\"r\") as yfile:\n params=yaml.load(yfile)\n # exit gracefully if yfile doesn't open\n self.nx0=params[camID]['nx0']\n self.ny0=self.nx0\n # pr0 is nx0/2, i.e. probably initial radius estimate.\n # pr0 rather than nx0 should be in the camera_cal_SSS.yaml config file\n self.pr0=(self.nx0+self.ny0)/4.\n self.ndy0=params[camID]['ndy0']\n self.ndx0=params[camID]['ndx0']\n self.cx=params[camID]['cx']\n self.cy=params[camID]['cy']\n self.rot=params[camID]['rot']\n self.beta=params[camID]['beta']\n self.azm=params[camID]['azm']\n self.c1=params[camID]['c1']\n self.c2=params[camID]['c2']\n self.c3=params[camID]['c3']\n self.lat=params[camID]['lat']\n self.lon=params[camID]['lon']\n# may need to resurrect this\n# xstart=int(params[camID]['cy']-nx0/2+0.5); ystart=int(params[camID]['cx']-ny0/2+0.5)\n self.nx0=int(self.nx0+0.5)\n self.ny0=int(self.ny0+0.5)", "def set_params(self, params):\n params = dict_to_namespace(params)\n\n # Set self.params\n self.params = Namespace()\n self.params.ndimx = params.ndimx\n self.params.model_str = getattr(params, 'model_str', 'optfixedsig')\n self.params.ig1 = getattr(params, 'ig1', 4.0)\n self.params.ig2 = getattr(params, 'ig2', 3.0)\n self.params.n1 = getattr(params, 'n1', 1.0)\n self.params.n2 = getattr(params, 'n2', 1.0)\n self.params.sigma = getattr(params, 'sigma', 1e-5)\n self.params.niter = getattr(params, 'niter', 70)\n self.params.kernel = getattr(params, 'kernel', kern_matern)\n self.params.trans_x = getattr(params, 'trans_x', False)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def reformat_pose_to_dict(self, now_pose):\n # now_pose è un dict in particolare { pose : [ {position : [{x : value , y:value , z:value} ] } , {orientation : [] } }\n # devo convertire i quaternioni in amgoli di eulero...estrarre i quaternioni da pose_now e convertirli in angoli RPY\n\n lato_corto_2 = 1.65 #1.45 # offset parcheggio\n \n #correggo gli offset x centrare le macchine nei parcheggi\n\n if abs(round(now_pose.position.x,2)) == 22.45:\n if now_pose.position.x < 0 :\n now_pose.position.x+=lato_corto_2\n now_pose.position.y-=0.4\n else :\n now_pose.position.x-=lato_corto_2\n now_pose.position.y+=0.4\n \n if abs(round(now_pose.position.y,2)) == 22.45:\n if now_pose.position.y < 0 :\n now_pose.position.y+=lato_corto_2\n now_pose.position.x+=0.4\n else :\n now_pose.position.y-=lato_corto_2\n now_pose.position.x-=0.4\n\n # correggo la z per renderla uguale all'asfalto che viene spownata nel mondo\n\n offset_asfalto = 0.3\n\n x = now_pose.position.x\n y = now_pose.position.y\n z = now_pose.position.z + offset_asfalto\n\n q1 = now_pose.orientation.x\n q2 = now_pose.orientation.y\n q3 = now_pose.orientation.z\n q4 = now_pose.orientation.w\n\n\n # converto i quaternioni in angoli di rulero RPY in radianti\n orientation_list = [q1,q2,q3,q4]\n\n euler = euler_from_quaternion( orientation_list )\n roll = euler[0]\n pitch = euler[1]\n yaw = round(euler[2],2) + np.pi\n\n\n # creo la lista dei parametri che mi servono nel campo pose:[] del file .yaml\n\n lista_parametri = [x ,y ,z ,roll ,pitch ,yaw ]\n\n # creo un dict con tutti i campi di cui ho bisogno nel file .yaml\n # settare le chiavi 'name' , ' type ' , 'package' , ' pose ' secondo le proprie necessità\n # i due stili sono equivalenti : usare quello preferito\n \"\"\"\n {\"name\" : \"park1\" , \n \"type\" : \"sdf\" , \n \"package\" : \"object_spawner\" , \n \"pose \":self.seq(lista_parametri) \n }\n \n \"\"\"\n lista_veicoli = ['macchina','pickup','ferrari','prius_hybrid','car_lexus','car_polo','car_volvo','car_golf']\n num_veicoli = 1\n\n #modificare qui implementando una funzione randomica se si vogliono piu veicoli casuali spawnati\n elemento_lista = {'name' : lista_veicoli[3],\n 'type': 'sdf',\n 'package': 'object_spawner',\n 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n #\"\"\"\n # elemento_lista = {'name' : 'ferrari',\n # 'type': 'urdf',\n # 'package': 'autopark',\n # 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n\n return elemento_lista", "def nircam_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F444W', grism='DFSR'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0648, 0], [0, 0.0648]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F277W':0.30, 'F356W':0.90, 'F444W': 3.00, 'F322W2':1.25, 'F430M':0.65, 'F460M':0.86, 'F410M':0.5} # F410M is a hack, no number\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRCam'\n h['READN'] = 9, 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'DFSR':\n h['GRISM'] = 'DFSR', 'Spectral trace along X'\n else:\n h['GRISM'] = 'DFSC', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def update_initial_pose(self, msg):\n xy_theta = \\\n self.transform_helper.convert_pose_to_xy_and_theta(msg.pose.pose)\n\n # TODO this should be deleted before posting\n self.transform_helper.fix_map_to_odom_transform(msg.pose.pose,\n msg.header.stamp)\n # initialize your particle filter based on the xy_theta tuple", "def write_parameters(par, version='git-devel'):\n # read template\n file = findpath('sesiflows.seistools') + '/' + 'specfem2d/par-' + version\n with open(file, 'r') as f:\n lines = f.readlines()\n lines[-1] = ' '.join(['1', str(par.NX), '1', str(par.NZ), '1'])\n\n # write parameter file\n file = 'DATA/Par_file'\n _writelines(file, lines)\n setpar('xmin', str(par.XMIN))\n setpar('xmax', str(par.XMAX))\n setpar('nx', str(par.NX))\n setpar('nt', str(par.NT))\n setpar('deltat', str(par.DT))\n setpar('nsources', str(1))\n\n # write interfaces file\n file = 'DATA/interfaces.dat'\n lines = []\n lines.extend('2\\n')\n lines.extend('2\\n')\n lines.extend('%f %f\\n'%(par.XMIN, par.ZMIN))\n lines.extend('%f %f\\n'%(par.XMAX, par.ZMIN))\n lines.extend('2\\n')\n lines.extend('%f %f\\n'%(par.XMIN, par.ZMAX))\n lines.extend('%f %f\\n'%(par.XMAX, par.ZMAX))\n lines.extend(str(par.NZ))\n _writelines(file, lines)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def setParams(self, p = 2):\n self.p = p\n self.l = p - 1\n self.id_ntot = {}\n self.id_y = {}\n self.id_W = {}\n self.id_X = {}\n for i in self.uniids:\n tracker = (self.data['id'] == i)\n self.id_ntot.update({i: np.sum(tracker)})\n self.id_y.update({i:\n self.data['weight'][tracker].reshape(np.sum(tracker), 1)})\n self.id_W.update({i: self._designMatrix_(p, tracker)})\n self.id_X.update({i:\n self._designMatrix_(self.l+1,tracker,is_X=True)})\n self.id_Z = self.id_W.copy()", "def get_probeLocs_calib_setup(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*1e-3*25.4, -4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4]\n y_pos = [-4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4, -4.25*1e-3*25.4]\n z_pos = [-2.25*1e-3*25.4, -0.75*1e-3*25.4, 0.75*1e-3*25.4, 2.25*1e-3*25.4]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def setIndexNames(self):\n self.theta = self.i1\n self.radial = self.i2", "def updateParameters(self, parameters):\r\n if parameters[0].altered:\r\n in_nc_file = parameters[0].valueAsText\r\n\r\n # Establish an object for reading the input NetCDF file\r\n ncFP = arcpy.NetCDFFileProperties(in_nc_file)\r\n\r\n # Loop through global variables in NetCDF file to gather projection information\r\n ncVarNames = ncFP.getVariablesByDimension('west_east')\r\n ncMassgridNames = []\r\n for x in ncVarNames:\r\n mgridvar = ncFP.getAttributeValue(x, 'stagger') # Only use variables on Massgrid for now ('M')\r\n if mgridvar == 'M':\r\n ncMassgridNames.append(x)\r\n parameters[1].filter.list = ncMassgridNames\r\n return", "def move_theta(self, theta0=None, dth=[0.1, 0.1], deltaE=[None, None]):\n if (theta0 is None):\n if (not self.rc.theta0):\n raise NameError('move_theta(): set theta0 first!')\n else:\n theta0 = self.rc.theta0\n if not self.rc.d:\n raise NameError('move_theta(): set crystal d-spacing first!')\n if not self.rc.Rm:\n raise NameError('move_theta(): set Rowland circle radius first')\n\n self.rc.set_theta0(theta0)\n d = self.rc.d\n ene0 = self.rc.get_ene()\n # adjust source energy to new theta0\n self.oe1.PHOT_CENT = ene0\n if (deltaE[0] is None):\n self.src.PH1 = bragg_ev(d, theta0+abs(dth[1]))\n else:\n self.src.PH1 = ene0 - abs(deltaE[0]) \n if (deltaE[1] is None):\n self.src.PH2 = bragg_ev(d, theta0-abs(dth[0]))\n else:\n self.src.PH2 = ene0 + abs(deltaE[1])\n \n # adjust source divergence to new theta0 (assumig flat)\n _hlp = abs(self.oe1.RLEN1) # half-length pos\n _hln = abs(self.oe1.RLEN2) # half-length neg\n _hwp = abs(self.oe1.RWIDX1) # half-width pos\n _hwn = abs(self.oe1.RWIDX2) # half-width neg\n _rth0 = self.rc.rtheta0\n _pcm = self.rc.p # cm\n # _vdiv_pos = math.atan( (_hlp * math.sin(_rth0)) / (_pcm + _hlp * math.cos(_rth0) ) )\n # _vdiv_neg = math.atan( (_hln * math.sin(_rth0)) / (_pcm - _hln * math.cos(_rth0) ) )\n # _hdiv_pos = math.atan( (_hwp / _pcm) )\n # _hdiv_neg = math.atan( (_hwn / _pcm) )\n _vdiv_pos = (_hlp * math.sin(_rth0)) / (_pcm + _hlp * math.cos(_rth0) )\n _vdiv_neg = (_hln * math.sin(_rth0)) / (_pcm - _hln * math.cos(_rth0) )\n _hdiv_pos = (_hwp / _pcm)\n _hdiv_neg = (_hwn / _pcm)\n self.src.VDIV1 = _vdiv_pos * 1.1 # empirical!\n self.src.VDIV2 = _vdiv_neg * 1.1 # empirical!\n self.src.HDIV1 = _hdiv_pos * 1.1 # empirical!\n self.src.HDIV2 = _hdiv_neg * 1.1 # empirical!\n\n # adjust analyser to new theta0\n self.oe1.set_frame_of_reference(self.rc.p, self.rc.q, 90.-theta0)\n\n if self.rc.showInfos:\n print(' --- theta movement infos --- ')\n print('OE1: PHOT_CENT = {0}'.format(self.oe1.PHOT_CENT )) \n print('SRC: PH1 = {0}'.format(self.src.PH1 )) \n print('SRC: PH2 = {0}'.format(self.src.PH2 )) \n print('SRC: VDIV1 = {0}'.format(self.src.VDIV1 )) \n print('SRC: VDIV2 = {0}'.format(self.src.VDIV2 )) \n print('SRC: HDIV1 = {0}'.format(self.src.HDIV1 )) \n print('SRC: HDIV2 = {0}'.format(self.src.HDIV2 )) \n print('OE1: T_SOURCE = {0}'.format(self.oe1.T_SOURCE )) \n print('OE1: T_IMAGE = {0}'.format(self.oe1.T_IMAGE )) \n print('OE1: T_INCIDENCE = {0}'.format(self.oe1.T_INCIDENCE )) \n print('OE1: T_REFLECTION = {0}'.format(self.oe1.T_REFLECTION))", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Dilu_ratio[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"0.5*(1.+@0*@1)\",Afb, Dilu_ratio)')\n self.modelBuilder.factory_('expr::Rmn(\"0.5*(1.-@0*@1)\",Afb, Dilu_ratio)')", "def setheaders(f):\n f.headers['OBSERVER'] = \"'%s'\" % camera.status.observer\n f.headers['FILTERID'] = \"'%s'\" % filtname(camera.status.filter)\n f.headers['FILTER'] = \"%1d\" % camera.status.filter\n f.headers['XYSTAGE'] = \"'%d,%d'\" % camera.status.guider\n f.headers['MIRROR'] = \"'%s'\" % camera.status.mirror\n if camera.status.imgtype == 'BIAS':\n f.headers['BIAS'] = camera.status.object\n elif camera.status.imgtype == 'DARK':\n f.headers['DARK'] = camera.status.object\n else:\n f.headers['OBJECT'] = camera.status.object\n try:\n skytemp = weather.status.skytemp\n f.headers['SKYTEMP'] = \"%4.1f\" % skytemp\n f.comments['SKYTEMP'] = \"'Infrared sky temp in degC'\"\n except:\n pass\n\n try:\n if not camera.status.TJ.current.posviolate: #Position calibrated to epoch\n ra = camera.status.TJ.current.Ra/15/3600\n dec = camera.status.TJ.current.Dec/3600\n epoch = camera.status.TJ.current.Epoch\n alt = camera.status.TJ.current.Alt\n GotTJ = True\n elif camera.status.TJ.current.RaC:\n ra = camera.status.TJ.current.RaC\n dec = camera.status.TJ.current.DecC\n alt = camera.status.TJ.current.Alt\n t = time.gmtime()\n epoch = t.tm_year + (t.tm_yday/366.0)\n GotTJ = True\n else:\n GotTJ = False\n except AttributeError:\n GotTJ = False \n if GotTJ:\n f.headers['RA_OBJ'] = \"%12.9f\" % (ra*15.0)\n f.headers['RA'] = \"'%s'\" % sexstring(ra)\n f.headers['DEC_OBJ'] = \"%13.9f\" % dec\n f.headers['DEC'] = \"'%s'\" % sexstring(dec)\n f.headers['EQUINOX'] = \"%6.1f\" % epoch\n f.headers['SECZ'] = \"%6.3f\" % (1/math.cos((90-alt)*math.pi/180))\n if GotFT:\n hjd,message = fitstime.findtime(fimage=f, verbose=0, allfields=0)\n if type(hjd) == float:\n f.headers['HJD'] = \"%f\" % hjd\n f.comments['HJD'] = \"Heliocentric Julian Day at exposure midpoint\"", "def __init__(self, theta=0, r=1,h=10,phi=0.2,Qa=(10,10),Qb=(10,10)):\n self.theta=theta\n self.r=r\n self.phi=phi\n self.h=h\n \n self.Qa0=Qa[0]\n self.Qa1=Qa[1]\n self.Qb0=Qb[0]\n self.Qb1=Qb[1]\n \n self.c1=self.Qa1-self.Qa0\n self.c2=self.Qb1-self.Qb0\n \n self.NewOrigin=(25,25)\n self.RotateAngle=-np.pi*3/4\n \n self.Te=0", "def asn_parameters(model='spk',**kwargs):\n\n pars = {'rhoe' : 6.5e-4,\n 'Ou' : 0.,\n 'Ku' : 100.,\n 'taue' : 1./60,\n 'Gtot' : 200., # MUST BE in [mM]\n 'Og' : 1.5,\n 'taug' : 30.,\n 'alpha': 0.5\n }\n pars = gu.merge_dicts(pars, gtrelease_parameters(),exocytosis_parameters())\n pars['ICs'] = np.asarray([0.,0.,0.05,0.99]) # [G_A,\\Gamma_S,c,h]\n pars['ICr'] = np.asarray([1,0.,0.,1.]) # [x_S,y_S,u_S,x_A]\n ## User-defined parameters\n pars = gu.varargin(pars, **kwargs)\n ## Takes only the first two elements of ICs in the MF model\n if model=='ave':\n pars['ICs'] = pars['ICs'][:2]\n if 'js' in kwargs:\n pars['js'] = kwargs['js']\n else:\n pars['js'] = pars['rhoe']*pars['Og']*1e3*pars['Gtot']*pars['taue']\n for k,item in pars.iteritems():\n if isscalar(item):\n pars[k] = float(item)\n else:\n pars[k] = array(item,dtype=float)\n # pars['Gtot'] *= 1e3 # Convert to [uM]\n return pars", "def MakeParams(params):\n if params['gs_model'] == system_types.kGroundStationModelTopHat:\n gps_primary_antenna_dir = [0.0, 0.0, -1.0]\n gps_primary_pos = [1.418, -1.657, -2.417]\n\n # TopHat doesn't actually have a secondary gps.\n gps_secondary_antenna_dir = gps_primary_antenna_dir\n gps_secondary_pos = gps_primary_pos\n\n # Angle [rad] from the GPS compass baseline to the zero-azimuth\n # reference of the perch frame. Note: The TopHat does not have a\n # GPS compass, but this value is set for historical consistency.\n gps_compass_to_perch_azi = -2.440\n\n elif params['gs_model'] == system_types.kGroundStationModelGSv1:\n gps_primary_antenna_dir = [0.0, 0.0, -1.0]\n # Position measured on 2015-06-15.\n gps_primary_pos = [0.0, 0.0, -2.94]\n # GSv1 doesn't actually have a secondary gps.\n gps_secondary_antenna_dir = gps_primary_antenna_dir\n gps_secondary_pos = gps_primary_pos\n\n # Angle [rad] from the GPS compass baseline to the zero-azimuth\n # reference of the perch frame\n gps_compass_to_perch_azi = -2.440\n\n elif params['gs_model'] == system_types.kGroundStationModelGSv2:\n gps_primary_antenna_dir = [0.0, 0.0, -1.0]\n gps_secondary_antenna_dir = [0.0, 0.0, -1.0]\n if params['test_site'] == system_types.kTestSiteParkerRanch:\n # See b/137283974 for details.\n gps_primary_pos = [-0.002, 0.011, -6.7]\n gps_secondary_pos = [-2.450, -0.428, -6.827]\n elif params['test_site'] == system_types.kTestSiteNorway:\n # See b/137660975 for details.\n gps_primary_pos = [-0.002, 0.011, -6.7]\n gps_secondary_pos = [-2.450, -0.428, -6.757]\n else:\n assert False, 'Unsupported test site.'\n # Angle [rad] from the GPS compass baseline to the zero-azimuth\n # reference of the platform frame. See b/118710931.\n gps_compass_to_perch_azi = np.deg2rad(169.84)\n\n else:\n assert False, 'Unsupported ground station model.'\n\n return {\n # Position [m] of the GS GPS antenna in the platform frame.\n # NOTE: The direction of the antennae is currently not used.\n 'primary_antenna_p': {\n 'antenna_dir': gps_primary_antenna_dir,\n 'pos': gps_primary_pos,\n },\n 'secondary_antenna_p': {\n 'antenna_dir': gps_secondary_antenna_dir,\n 'pos': gps_secondary_pos,\n },\n\n # Calibration for the ground station compass ([#], [rad], [#]).\n # The bias is used to account for the angle between the perch\n # frame and the NovAtel differential GPS receiver.\n # TODO: Remove this parameter once the computation of\n # compass heading from the primary and secondary antennae is implemented.\n 'heading_cal': {\n 'scale': 1.0, 'bias': gps_compass_to_perch_azi, 'bias_count': 0}\n }", "def _sims_header(self, hdr):\n # Called DefAnalysisBis and DefEps in OpenMIMS\n d = {}\n d['simsheader version'], d['original filename'], d['matrix'], \\\n d['sigref auto'], d['sigref points'], d['sigref delta'], \\\n d['sigref scan time'], d['sigref measure time'], \\\n d['sigref beam on time'], d['eps centering enabled'], \\\n d['eps enabled'], d['eps central energy'], d['eps b field'] = \\\n unpack(self._bo + 'i 256s 256s 10i', hdr.read(556))\n\n d['EPSCentralSpecies'] = self._species(hdr)\n d['EPSReferenceSpecies'] = self._species(hdr)\n\n # Don't know how long method name is, runs into null-padded zone.\n d['eps ref mass tube hv'], d['eps ref mass tube hv max var'], \\\n d['sample rotation'], d['sample rotation speed'], \\\n d['sample rotation synced'], d['sample name'], \\\n d['user name'], d['method name'] = \\\n unpack(self._bo + '2d 3i 80s 32s 256s', hdr.read(396))\n\n d['original filename'] = self._cleanup_string(d['original filename'])\n d['matrix'] = self._cleanup_string(d['matrix'])\n d['sample name'] = self._cleanup_string(d['sample name'])\n d['user name'] = self._cleanup_string(d['user name'])\n d['method name'] = self._cleanup_string(d['method name'])\n\n d['sigref auto'] = bool(d['sigref auto'])\n d['eps centering enabled'] = bool(d['eps centering enabled'])\n d['eps enabled'] = bool(d['eps enabled'])\n d['sample rotation'] = bool(d['sample rotation'])\n d['sample rotation synced'] = bool(d['sample rotation synced'])\n d['sigref scan time'] /= 10 # 0.1 sec increments\n return d", "def skyrmion_m_field(self, pos, sign,\n sk_pos=None, sk_r=4, core=1, pi_factor=1.,\n out_skyrmion_dir=None\n ):\n\n if sk_pos is None:\n # We assume a square sized hexagonal mesh so the centre\n # is at half of every dimension\n sk_pos = self.sim.mesh.Lx * 0.5, self.sim.mesh.Ly * 0.5\n\n x = (pos[0] - sk_pos[0])\n y = (pos[1] - sk_pos[1])\n\n if np.sqrt(x ** 2 + y ** 2) <= sk_r:\n # Polar coordinates:\n r = (x ** 2 + y ** 2) ** 0.5\n phi = np.arctan2(y, x)\n # This determines the profile we want for the skyrmion\n # Single twisting: k = pi / R\n k = pi_factor * np.pi / sk_r\n\n # We define here a 'hedgehog' skyrmion pointing down\n return (sign * np.sin(k * r) * np.cos(phi),\n sign * np.sin(k * r) * np.sin(phi),\n core * np.cos(k * r))\n else:\n if not out_skyrmion_dir:\n return (0, 0, -core)\n else:\n return out_skyrmion_dir", "def get_acquisition_pars(theta=None, phi=None, shift=None, nx=None, ny=None, cfg=None):\n # ss_rect_map = {(13, 13): 1E7, (13, 14): 1E7, (13, 15): 1E7, (13, 16): 1E7, (13, 17): 1E7,\n # (14, 13): 1E7, (14, 14): 1E5, (14, 15): 1E5, (14, 16): 1E5, (14, 17): 1E7,\n # (15, 13): 1E7, (15, 14): 1E5, (15, 15): 5E4, (15, 16): 1E5, (15, 17): 1E7,\n # (16, 13): 1E7, (16, 14): 1E5, (16, 15): 1E5, (16, 16): 1E5, (16, 17): 1E7,\n # (17, 13): 1E7, (17, 14): 1E7, (17, 15): 1E7, (17, 16): 1E7, (17, 17): 1E7}\n nmeans_dict = {(15, 15): 1,\n(16, 15): 1,\n(16, 16): 1,\n(15, 16): 1,\n(14, 16): 1,\n(14, 15): 1,\n(14, 14): 1,\n(15, 14): 1,\n(16, 14): 1,\n(17, 14): 2,\n(17, 15): 1,\n(17, 16): 1,\n(17, 17): 2,\n(16, 17): 1,\n(15, 17): 1,\n(14, 17): 1,\n(13, 17): 1,\n(13, 16): 2,\n(13, 15): 1,\n(13, 14): 1,\n(13, 13): 5,\n(14, 13): 2,\n(15, 13): 1,\n(16, 13): 5,\n(17, 13): 5,\n(18, 13): 5,\n(18, 14): 5,\n(18, 15): 5,\n(18, 16): 5,\n(18, 17): 5,\n(18, 18): 5,\n(17, 18): 5,\n(16, 18): 2,\n(15, 18): 1,\n(14, 18): 1,\n(13, 18): 5,\n(12, 18): 5,\n(12, 17): 5,\n(12, 16): 5,\n(12, 15): 5,\n(12, 14): 5,\n(12, 13): 5,\n(12, 12): 5,\n(13, 12): 5,\n(14, 12): 5,\n(15, 12): 5,\n(16, 12): 5,\n(17, 12): 5,\n(18, 12): 5,\n(19, 12): 5,\n(19, 13): 5,\n(19, 14): 5,\n(19, 15): 5,\n(19, 16): 5,\n(19, 17): 5,\n(19, 18): 5,\n(19, 19): 5,\n(18, 19): 5,\n(17, 19): 5,\n(16, 19): 5,\n(15, 19): 5,\n(14, 19): 5,\n(13, 19): 5,\n(12, 19): 5,\n(11, 19): 5,\n(11, 18): 5,\n(11, 17): 5,\n(11, 16): 5,\n(11, 15): 5,\n(11, 14): 5,\n(11, 13): 5,\n(11, 12): 5,\n(11, 11): 5,\n(12, 11): 5,\n(13, 11): 5,\n(14, 11): 5,\n(15, 11): 5,\n(16, 11): 5,\n(17, 11): 5,\n(18, 11): 5,\n(19, 11): 5}\n\n\n\n # led_center = 15\n # led_disp = (int(cfg.array_size)+1)//2\n # led_range = range(led_center-led_disp, led_center+led_disp)\n # ledmap = product(led_range, led_range)\n #\n # ss_dict = {}\n # for led in ledmap:\n # # if led == [15, 15]:\n # # ss_dict[(led[0], led[1])] = 60E4\n # # else:\n # dist = (np.abs(led[0]-15)**2+np.abs(led[1]-15))\n # ss = 5.E5*(1+.5*dist)\n # ss_dict[(led[0], led[1])] = ss\n # if ss >3E6:\n # ss_dict[(led[0], led[1])] = 3E6\n\n power = 255\n # Camera parameters\n if nx is not None:\n # if nx == 14 or nx == 15 or nx ==16 or ny == 15 or ny ==16 or ny == 14:\n # shutter_speed = 50000\n # else:\n # shutter_speed = 500000\n # nmeans = nmeans_dict[nx, ny]\n # if [nx, ny] in [[15, 15], [15, 16], [14, 17], [14,16], [14, 15],\n # [14, 14], [13,16], [13, 15]]:\n # shutter_speed = 100000\n # nmeans = 1\n # else:\n # shutter_speed = 600000\n # nmeans = 1\n\n try:\n # shutter_speed = ss_dict[nx, ny]\n shutter_speed = 50000\n nmeans = nmeans_dict[nx, ny]\n except:\n shutter_speed = 1E5\n nmeans = 1\n return float(cfg.iso), shutter_speed, power, nmeans\n\n shutter_speed_min = cfg.shutter_speed[0]\n shutter_speed_max = cfg.shutter_speed[0]\n if phi == None:\n if shift == None:\n raise Exception(\"Must assign a value either for phi or shift.\")\n shutter_speed = translate(phi, 0, cfg.shift_max,\n shutter_speed_min, shutter_speed_max)\n else:\n shutter_speed = translate(phi, 0, 90,\n shutter_speed_min, shutter_speed_max)\n # Led parameters\n led_power = cfg.max_led_power\n return cfg.iso, shutter_speed, led_power, nmeans", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def _nanosims_header(self, hdr):\n # Called MaskNano in OpenMIMS; BFieldTab separated out; create extra sub-dict PeakCenter\n d = {}\n d['PeakCenter'] = {}\n d['nanosimsheader version'], d['regulation mode'], d['mode'], \\\n d['grain mode'], d['semigraphic mode'], d['stage delta x'], \\\n d['stage delta y'], d['working frame width'], \\\n d['working frame height'], d['scanning frame x'], \\\n d['scanning frame width'], d['scanning frame y'], \\\n d['scanning frame height'], d['counting frame x start'], \\\n d['counting frame x end'], d['counting frame y start'], \\\n d['counting frame y end'], d['detector type'], d['electron scan'], \\\n d['scanning mode'], d['beam blanking'], \\\n d['PeakCenter']['peakcenter enabled'], d['PeakCenter']['start'], \\\n d['PeakCenter']['frequency'], d['b fields'] = \\\n unpack(self._bo + '25i', hdr.read(100))\n\n d['PeakCenter']['peakcenter enabled'] = bool(d['PeakCenter']['peakcenter enabled'])\n d['regulation mode'] = bool(d['regulation mode'])\n d['grain mode'] = bool(d['grain mode'])\n d['semigraphic mode'] = bool(d['semigraphic mode'])\n d['scanning mode'] = bool(d['scanning mode'])\n\n # Set a few extra variables.\n d['counting frame width'] = d['counting frame x end'] - d['counting frame x start'] + 1\n d['counting frame height'] = d['counting frame y end'] - d['counting frame y start'] + 1\n\n # Found in at least one version (file v11, nsHeader v8) a repeat of\n # Poly_list and this first part of nanoSIMSHeader. Total of repeat\n # adds up to 288. After last Poly_list, 288 byte padding zone, not all\n # null-bytes.\n hdr.seek(288, 1)\n\n # Is this the nPrintRed from OpenMIMS?\n d['print results'] = bool(unpack(self._bo + 'i', hdr.read(4))[0])\n\n d['SibCenterHor'] = self._sib_center(hdr)\n d['SibCenterVert'] = self._sib_center(hdr)\n\n # Duplicate and store these two in sub dicts\n b_field_index, has_sib_center = \\\n unpack(self._bo + '2i', hdr.read(8))\n if b_field_index < 0:\n b_field_index = None\n has_sib_center = bool(has_sib_center)\n\n d['SibCenterHor']['b field index'] = b_field_index\n d['SibCenterVert']['b field index'] = b_field_index\n d['SibCenterHor']['sib center enabled'] = has_sib_center\n d['SibCenterVert']['sib center enabled'] = has_sib_center\n\n d['EnergyCenter'] = self._energy_center(hdr)\n d['E0SCenter'] = self._e0s_center(hdr)\n\n d['EnergyCenter']['wait time'], d['presputtering raster'], \\\n d['PeakCenter']['E0P offset'], d['E0SCenter']['steps'], \\\n d['baseline measurement'], d['baseline offset'], \\\n d['baseline frequency'] = \\\n unpack(self._bo + '5i d i', hdr.read(32))\n return d", "def set_trans(self, head_mri_trans):\n x, y, z = -self.mri_origin[0]\n mri_tgt_trans = translation(x, y, z)\n head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)\n\n x, y, z = self.hsp.nasion[0]\n src_hsp_trans = translation(x, y, z)\n src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)\n\n rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])\n x, y, z = src_tgt_trans[:3, 3]\n\n self.rot_x = rot_x\n self.rot_y = rot_y\n self.rot_z = rot_z\n self.trans_x = x\n self.trans_y = y\n self.trans_z = z", "def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]", "def resetAlignmentCenter(self):\n cent = self.TiltSeries_._TiltAlignmentParas.cent\n imdimX = self.TiltSeries_._imdimX\n imdimY = self.TiltSeries_._imdimY\n print(imdimX, imdimY)\n if cent[0] != imdimX//2+1 or cent[1] != imdimY//2+1:\n #rint \"Centers do not match: cent=\"+str(cent)+\", imdim=\"+str(imdim)\n self.TiltSeries_._TiltAlignmentParas.cent = [imdimX//2+1, imdimY//2+1]", "def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [1./self.lengthscale, 1.]\r\n self.b = [1]\r\n\r\n self.basis_alpha = np.ones((self.n_basis,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)", "def configure(self):\n super(ProjectionMatrix, self).configure()\n if self.sensors is None:\n self.sensors = self.skin_air.sensors\n\n if isinstance(self.sensors, sensors_module.SensorsEEG):\n self.skin_air.sensors = self.sensors\n self.skin_air.sensors_to_surface, self.skin_air.sensor_locations = self.sensors.sensors_to_surface(self.skin_air)\n\n # Create OpenMEEG objects from TVB objects.\n self.om_head = self.create_om_head()\n self.om_sources = self.create_om_sources()\n self.om_sensors = self.create_om_sensors()\n\n # Calculate based on type of sources\n if isinstance(self.sources, surfaces_module.Cortex):\n self.om_source_matrix = self.surface_source() #NOTE: ~1 hr\n elif isinstance(self.sources, connectivity_module.Connectivity):\n self.om_source_matrix = self.dipole_source()\n\n # Calculate based on type of sensors\n if isinstance(self.sensors, sensors_module.SensorsEEG):\n self.om_head2sensor = self.head2eeg()\n elif isinstance(self.sensors, sensors_module.SensorsMEG):\n self.om_head2sensor = self.head2meg()\n if isinstance(self.sources, surfaces_module.Cortex):\n self.om_source2sensor = self.surf2meg()\n elif isinstance(self.sources, connectivity_module.Connectivity):\n self.om_source2sensor = self.dip2meg()\n\n #NOTE: ~1 hr\n self.om_inverse_head = self.inverse_head(inv_head_mat_file = \"hminv_uid\")", "def set_global(self, src_ds):\n if src_ds:\n self.description = os.path.basename(src_ds.GetDescription())\n self.driver = src_ds.GetDriver().ShortName\n self.projection = src_ds.GetProjection()\n self.transform = OrderedDict(\n zip(\n [\n \"xOrigin\",\n \"pixelWidth\",\n \"rotation_2\",\n \"yOrigin\",\n \"rotation_4\",\n \"pixelHeight\",\n ],\n src_ds.GetGeoTransform(),\n ))", "def calculateSipWcsHeader(wcs, order, bbox, spacing, header=None):\n transform = getPixelToIntermediateWorldCoords(wcs)\n crpix = wcs.getPixelOrigin()\n cdMatrix = wcs.getCdMatrix()\n crval = wcs.getSkyOrigin()\n gridNum = Extent2I(int(bbox.getWidth()/spacing + 0.5), int(bbox.getHeight()/spacing + 0.5))\n\n sip = SipApproximation(transform, crpix, cdMatrix, Box2D(bbox), gridNum, order)\n\n md = makeTanSipMetadata(sip.getPixelOrigin(), crval, sip.getCdMatrix(), sip.getA(), sip.getB(),\n sip.getAP(), sip.getBP())\n\n if header is not None:\n header.combine(md)\n else:\n header = md\n\n return header", "def set_axes(self,pdf=None,sfr=(-6,2,100),vout=(0,4,500),cs=(0,4,500),verbose=False):\n if pdf is not None:\n if verbose:\n print('Setting up from simulation PDF...')\n attrs = pdf.attrs\n u = pdf.logvout.data\n x1,x2,dbin = u.min(), u.max(), attrs['dbin']\n print(' u in ({:.1f},{:.1f}) with du = {:.2f}'.format(x1,x2,dbin))\n w = pdf.logcs.data\n x1,x2,dbin = w.min(), w.max(), attrs['dbin']\n print(' w in ({:.1f},{:.1f}) with dw = {:.2f}'.format(x1,x2,dbin))\n print(' Sigma_SFR = {:.3g},'.format(attrs['sfr']), end=' ')\n print('ZISM = {:.3g}'.format(attrs['ZISM']))\n for fl in ['Mpdf','ppdf','Epdf','Zpdf']:\n # log value of 2.e4 and 5.5e5\n T1,T2=(1.1854266752455402,1.9121660193614398)\n c=pdf[fl].sel(logcs=slice(0,T1)).sum().data*dbin**2\n i=pdf[fl].sel(logcs=slice(T1,T2)).sum().data*dbin**2\n h=pdf[fl].sel(logcs=slice(T2,4)).sum().data*dbin**2\n t=pdf[fl].sel().sum().data*dbin**2\n msg = ' {:5s}:'.format(fl)\n for ph, fph in zip(['cool','int','hot','total'],[c,i,h,t]):\n msg += ' {}={:.3f}'.format(ph,fph)\n print(msg)\n self.logvout = pdf.logvout\n self.logcs = pdf.logcs\n self.dlogvout = pdf.attrs['dbin']\n self.dlogcs = pdf.attrs['dbin']\n self.sfr = pdf.attrs['sfr']\n self.logsfr = np.log10(self.sfr)\n self.vout = 10.**self.logvout\n self.cs = 10.**self.logcs\n self.params['ZISM0']=pdf.attrs['ZISM']\n else:\n ranges=dict(cs=cs,vout=vout)\n if hasattr(sfr, '__len__'):\n if len(sfr) == 3:\n ranges['sfr']=sfr\n else:\n raise ValueError('sfr should either be an array/list/tuple of'+\n 'three elements (log min, log max, N), '+\n 'but len(sfr)={}'.format(len(sfr)))\n else: # scalar\n self.sfr=sfr\n if sfr>0:\n self.logsfr=np.log10(sfr)\n else:\n raise ValueError('sfr must be positive, but sfr={}'.format(sfr))\n if verbose: print('sfr={}'.format(sfr))\n\n for f in ranges:\n if len(ranges[f]) != 3:\n raise ValueError('{} should either be array-like with '.format(f)+\n 'three elements (log min, log max, N), '+\n 'but len({})={}'.format(f,len(ranges[f])))\n\n x1,x2,N = ranges[f]\n if verbose: print('{}: min={}, max={}, N={}'.format(f,x1,x2,N))\n x = np.linspace(x1,x2,N)\n x_da = xr.DataArray(x,coords=[x],dims=['log'+f])\n setattr(self,'dlog'+f,x[1]-x[0])\n setattr(self,'log'+f,getattr(x_da,'log'+f))\n setattr(self,f,10.**getattr(self,'log'+f))\n\n self.u = self.logvout\n self.w = self.logcs\n self.vBz = np.sqrt(5.0*self.cs**2+self.vout**2)\n self.Mach = 1/self.cs*self.vout", "def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [3./self.lengthscale**2, 2*np.sqrt(3)/self.lengthscale, 1.]\r\n self.b = [1,self.lengthscale**2/3]\r\n\r\n self.basis_alpha = np.ones((self.n_basis,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)", "def initSMParams(Q, x, y, sn):\n x = np.atleast_2d(x)\n y = np.atleast_2d(y)\n n, D = x.shape\n w = np.zeros(Q)\n m = np.zeros((D,Q))\n s = np.zeros((D,Q))\n w[:] = np.std(y) / Q\n hypinit = {\n 'cov': np.zeros(Q+2*D*Q),\n 'lik': np.atleast_1d(np.log(sn)),\n 'mean': np.array([])\n }\n\n for i in range(0,D):\n # Calculate distances\n xslice = np.atleast_2d(x[:,i])\n d2 = spat.distance.cdist(xslice, xslice, 'sqeuclidean')\n if n > 1:\n d2[d2 == 0] = d2[0,1]\n else:\n d2[d2 == 0] = 1\n minshift = np.min(np.min(np.sqrt(d2)))\n nyquist = 0.5/minshift\n m[i,:] = nyquist*np.random.ranf((1,Q))\n maxshift = np.max(np.max(np.sqrt(d2)))\n s[i,:] = 1./np.abs(maxshift*np.random.ranf((1,Q)))\n\n hypinit['cov'][0:Q] = np.log(w)\n hypinit['cov'][Q + np.arange(0,Q*D)] = np.log(m[:]).T\n hypinit['cov'][Q + Q*D + np.arange(0,Q*D)] = np.log(s[:]).T\n return hypinit", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def loadCSPAD2x2CalibParsDefault (self) :\n self.defpars = {}\n\n self.defpars['center'] = np.array( [[198., 198.],\n [ 95., 308.],\n [ 0., 0.]])\n\n self.defpars['tilt'] = np.zeros((2), dtype=np.float32)\n\n self.defpars['beam_vector'] = np.zeros((3), dtype=np.float32)\n\n self.defpars['common_mode'] = np.array([1, 100, 30])\n\n self.defpars['pedestals'] = np.zeros((185, 388, 2), dtype=np.float32)\n\n self.defpars['pixel_status'] = np.zeros((185, 388, 2), dtype=np.uint16)\n\n self.defpars['filter'] = np.array([1, 100, 10])", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.70,0.70]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"(1.+@0)\",Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(1.-@0)\",Afb)')", "def eclipse_parameters(sat, earth, sun, time):\n\n position = earth + sat\n barycentric_e = position.at(time).observe(earth)\n barycentric_s = position.at(time).observe(sun)\n _, _, distance_to_earth = barycentric_e.radec()\n _, _, distance_to_sun = barycentric_s.radec()\n theta_e = semidiameter(earthlib.earth_radius_au, distance_to_earth.au)\n theta_s = semidiameter(0.00465, distance_to_sun.au) # Sun's average radius in AU = 0.00465\n theta = barycentric_e.separation_from(barycentric_s).radians\n return theta, theta_e, theta_s", "def __init__(self, angle = 'deg'):\n \n name = \"Spherical\"\n Qstr = [\"r\", \"theta\", \"phi\"]\n Xstr = [\"x\", \"y\", \"z\"]\n \n super().__init__(self._csSpherical_q2x, nQ = 3,\n nX = 3, name = name, \n Qstr = Qstr, Xstr = Xstr,\n maxderiv = None, isatomic = False,\n zlevel = None)\n \n if angle == 'deg' or angle == 'rad':\n self.angle = angle # 'deg' or 'rad'\n else:\n raise ValueError('angle must be ''deg'' or ''rad''.')" ]
[ "0.59060866", "0.5602356", "0.54695225", "0.531587", "0.5250702", "0.51489925", "0.512252", "0.51054573", "0.50826424", "0.50823313", "0.50728947", "0.5068519", "0.5064257", "0.50619185", "0.504671", "0.5043761", "0.5035546", "0.5006031", "0.49904823", "0.49792513", "0.49603698", "0.49561316", "0.49539453", "0.49525517", "0.49416146", "0.49367455", "0.4935392", "0.4934906", "0.4926118", "0.49192843", "0.49155524", "0.49091062", "0.49055037", "0.49045554", "0.48867851", "0.48837963", "0.48767054", "0.48752066", "0.48746902", "0.48588878", "0.48559758", "0.48304233", "0.48097807", "0.48091453", "0.48024443", "0.48015484", "0.48013803", "0.47979638", "0.4794739", "0.47941524", "0.479246", "0.4785439", "0.4783619", "0.47787234", "0.47765055", "0.47763425", "0.47677985", "0.4767235", "0.47585693", "0.47563446", "0.475508", "0.47511777", "0.47502673", "0.47434843", "0.47393236", "0.47391033", "0.47290874", "0.47206318", "0.47200334", "0.4719917", "0.47192773", "0.47135153", "0.47132868", "0.47112334", "0.47077152", "0.4707401", "0.47068062", "0.4706304", "0.4700425", "0.47003147", "0.4698477", "0.4696369", "0.4693954", "0.46931276", "0.46903518", "0.4689491", "0.46850118", "0.46831933", "0.4679959", "0.4678261", "0.46779382", "0.46777397", "0.4676083", "0.467591", "0.46752045", "0.4672301", "0.46702462", "0.46623012", "0.46621752", "0.46615595" ]
0.5937973
0
recover numerical values of CTF parameters from EMAN2 CTF object stored in a header of the input image
def get_ctf(ima): from EMAN2 import EMAN2Ctf ctf_params = ima.get_attr("ctf") return ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_aperture_coeffs_in_header(head):\n\n coeffs = {}\n for key, value in head.items():\n exp = '^GAMSE TRACE CHANNEL [A-Z] APERTURE \\d+ COEFF \\d+$'\n if re.match(exp, key) is not None:\n g = key.split()\n channel = g[3]\n aperture = int(g[5])\n icoeff = int(g[7])\n if (channel, aperture) not in coeffs:\n coeffs[(channel, aperture)] = []\n if len(coeffs[(channel, aperture)]) == icoeff:\n coeffs[(channel, aperture)].append(value)\n return coeffs", "def read_file(file_name):\n fits_file = fits.open(file_name)\n\n header = fits_file[0].header\n image_data = fits_file[1].data\n\n segmentation_data = fits_file[2].data\n\n header_keywords = {'CRVAL3': 0, 'CRPIX3': 0, 'CD3_3': 0}\n # clause to differentiate between CDELT3 and CD3_3\n\n for hdr_key, hdr_value in header_keywords.items():\n # finding required header values\n hdr_value = header[hdr_key]\n header_keywords[hdr_key] = hdr_value\n\n return header_keywords, image_data, segmentation_data", "def getImageInfo(img, header=''):\n if (os.path.exists(img) == False):\n print \"image not found: \", img\n return\n # Assume this is a CASA image\n if (header == ''):\n try:\n print \"imhead\",\n header = imhead(img, mode = 'list') # This will work for most CASA builds\n except:\n print \"imhead\",\n header = imhead(img) # needed to prevent crash in early CASA 4.6 builds (see CAS-8214)\n print \"imhead\",\n header = imhead(img, mode = 'list')\n if (header is None):\n print \"imhead returned NoneType. This image header is not sufficiently standard.\"\n return\n if ('beammajor' in header.keys()):\n bmaj = header['beammajor']\n bmin = header['beamminor']\n bpa = header['beampa']\n elif ('perplanebeams' in header.keys()):\n beammajor = []\n beamminor = []\n beampa = []\n for beamchan in range(header['perplanebeams']['nChannels']):\n beamdict = header['perplanebeams']['*'+str(beamchan)]\n beammajor.append(beamdict['major']['value'])\n beamminor.append(beamdict['minor']['value'])\n beampa.append(beamdict['positionangle']['value'])\n bmaj = np.median(beammajor)\n bmin = np.median(beamminor)\n sinbpa = np.sin(np.radians(np.array(beampa)))\n cosbpa = np.cos(np.radians(np.array(beampa)))\n bpa = np.degrees(np.median(np.arctan2(np.median(sinbpa), np.median(cosbpa))))\n else:\n bmaj = 0\n bmin = 0\n bpa = 0\n naxis1 = header['shape'][0]\n naxis2 = header['shape'][1]\n cdelt1 = header['cdelt1']\n cdelt2 = header['cdelt2']\n if (header['cunit1'].find('rad') >= 0):\n # convert from rad to arcsec\n cdelt1 *= 3600*180/np.pi\n elif (header['cunit1'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt1 *= 3600\n if (header['cunit2'].find('rad') >= 0):\n cdelt2 *= 3600*180/np.pi\n # convert from rad to arcsec\n elif (header['cunit2'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt2 *= 3600\n if (type(bmaj) == dict):\n # casa >= 4.1.0 (previously these were floats)\n bmaj = headerToArcsec(bmaj)\n bmin = headerToArcsec(bmin)\n bpa = headerToArcsec(bpa)/3600.\n ghz = 0\n if ('ctype4' in header.keys()):\n if (header['ctype4'] == 'Frequency'):\n imgfreq = header['crval4']\n cdelt = header['cdelt4']\n crpix = header['crpix4']\n npix = header['shape'][3]\n ghz = imgfreq*1e-9\n if (ghz == 0):\n if ('ctype3' in header.keys()):\n if (header['ctype3'] == 'Frequency'):\n imgfreq = header['crval3']\n cdelt = header['cdelt3']\n crpix = header['crpix3']\n npix = header['shape'][2]\n ghz = imgfreq*1e-9\n return([bmaj,bmin,bpa,cdelt1,cdelt2,naxis1,naxis2,ghz], header)", "def extract(self,image_path):#image_path\r\n\r\n img = caffe.io.load_image(image_path)\r\n \r\n #image1=cv2.imread(caffe_root + 'examples/images/cat.jpg') \r\n #img=cv2.cvtColor(image1,cv2.COLOR_BGR2RGB) \r\n #img=img/255. \r\n \r\n\r\n transformed_image = self.transformer.preprocess('data', img)\r\n self.net.blobs['data'].data[...] = transformed_image\r\n ft = self.net.forward()\r\n ft = np.squeeze(ft['pool5/7x7_s1'])\r\n ft = ft / LA.norm(ft)\r\n return ft", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def __read_header(self):\n header = self.__file_object.readline()\n header_string = header.decode('utf-8')\n print(header_string)\n # Ignore first letter\n self.frame_width = int(re.findall('W\\d+', header_string)[0][1:])\n self.frame_height = int(re.findall('H\\d+', header_string)[0][1:])\n self.frame_rate = re.findall('F\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual frame rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.frame_rate.split(':')]\n self.frame_rate = round(tokens[0] / tokens[1], 1)\n\n self.__pixel_aspect_ratio = re.findall('A\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual pixel aspect ratio rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.__pixel_aspect_ratio.split(':')]\n self.__pixel_aspect_ratio = round(tokens[0] / tokens[1], 1)\n\n # Don't ignore for interlacing\n self.__interlacing_mode = re.findall('I(p|t|b|m)', header_string)[0]\n\n # Ignore first 'FRAME\\n' terminator so the file object points to the first byte of raw data of the first frame\n self.__file_object.readline()\n\n self.__first_frame_raw_data_position = self.__file_object.tell()\n\n self.determine_color_space_by_frame_size()\n\n # Restore\n self.__file_object.seek(self.__first_frame_raw_data_position)\n\n return header\n\n # Color space parameter is missing?\n print('FourCC:\\t\\t', header_string[:4])\n print('Input file:\\t', self.__input_file_path)\n print('Frame size:\\t', f'{self.frame_width}x{self.frame_height}')\n print('Frame rate:\\t', f'{self.frame_rate} FPS')\n print('Aspect Ratio:\\t', self.__pixel_aspect_ratio)\n print('Color space\\t', self.color_space)\n print('Frame size (raw data):', self.__frame_raw_data_size)\n print('Position of first raw:', self.__first_frame_raw_data_position)", "def test_read_with_cclib():\n main([\"-g\", \"/tmp/fnord.Gaussian.gjf\"])\n main([\"/tmp/fnord.Gaussian.gjf\", \"data/benzene.out\"])\n assert_equals(\n open(\"data/benzene.gjf\").read(),\n \"\"\"#Put Keywords Here, check Charge and Multiplicity.\n\n data/benzene.out\n\n0 1\nC 1.7458930000 1.7957530000 -1.0597530000\nC 0.9484120000 2.8689700000 -1.4311180000\nC 1.4480470000 1.0743490000 0.0876540000\nC -0.1470660000 3.2206120000 -0.6552520000\nC 0.3525690000 1.4259910000 0.8635200000\nC -0.4449120000 2.4992080000 0.4921550000\nH 2.5997410000 1.5203090000 -1.6651660000\nH 1.1810280000 3.4311430000 -2.3262420000\nH 2.0700040000 0.2375420000 0.3781170000\nH -0.7690240000 4.0574200000 -0.9457150000\nH 0.1199530000 0.8638180000 1.7586440000\nH -1.2987600000 2.7746520000 1.0975680000\n\n\"\"\",\n )", "def readenergyfile(filename):\n def parsemeta(metalines):\n \"\"\"Parse metadata lines to get metadata object (ordered dict)\n\n Allow only numbers, lists of numbers and strings\n \"\"\"\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))\n\n with io.open(filename, 'r') as datafile:\n components, meta = [], []\n for ii, line in enumerate(datafile):\n line = line.strip()\n if (line == '') or line.startswith('vector'):\n continue\n elif line.startswith('#'):\n meta.append(line)\n else:\n fields = line.split('#', 1)\n data = [x.strip() for x in fields[0].split(',')]\n comment = fields[1] if len(fields) > 1 else ''\n carrier, ctype, originoruse = data[0:3]\n values = [float(v.strip()) for v in data[3:]]\n\n if ctype not in ('PRODUCCION', 'CONSUMO'):\n raise ValueError(\"Carrier type is not 'CONSUMO' or 'PRODUCCION' in line %i\\n\\t%s\" % (ii+2, line))\n if originoruse not in ('EPB', 'NEPB', 'INSITU', 'COGENERACION'):\n raise ValueError((\"Origin or end use is not 'EPB', 'NEPB', 'INSITU' or 'COGENERACION'\"\n \" in line %i\\n\\t%s\" % (ii+2, line)))\n\n components.append({ \"carrier\": carrier, \"ctype\": ctype,\n \"originoruse\": originoruse,\n \"values\": values, \"comment\": comment })\n numsteps = [len(c['values']) for c in components]\n if max(numsteps) != min(numsteps):\n raise ValueError(\"All input must have the same number of timesteps.\")\n return (parsemeta(meta), components)", "def read_parameters():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n param1 = hdulist1[1].data['e1'][:sample]\n param2 = hdulist1[1].data['e2'][:sample]\n weights = hdulist1[1].data['weight'][:sample]\n return param1, param2, weights", "def get_calib_from_header(header):\n\n prefix = 'HIERARCH GAMSE WLCALIB '\n\n xorder = header[prefix+'XORDER']\n yorder = header[prefix+'YORDER']\n\n coeff = np.zeros((yorder+1, xorder+1))\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n coeff[j,i] = header[prefix+'COEFF {:d} {:d}'.format(j, i)]\n\n calib = {\n 'coeff': coeff,\n 'npixel': header[prefix+'NPIXEL'],\n 'k': header[prefix+'K'],\n 'offset': header[prefix+'OFFSET'],\n 'std': header[prefix+'STDDEV'],\n 'nuse': header[prefix+'NUSE'],\n 'ntot': header[prefix+'NTOT'],\n# 'identlist': calibwindow.identlist,\n 'window_size': header[prefix+'WINDOW_SIZE'],\n 'xorder': xorder,\n 'yorder': yorder,\n 'maxiter': header[prefix+'MAXITER'],\n 'clipping': header[prefix+'CLIPPING'],\n 'q_threshold': header[prefix+'Q_THRESHOLD'],\n 'direction': header[prefix+'DIRECTION'],\n }\n return calib", "def read_fermi(self):\n E_f=None\n for line in open('OUTCAR', 'r'):\n if line.rfind('E-fermi') > -1:\n E_f=float(line.split()[2])\n return E_f", "def parse_cif(cif_name='iso.cif'):\n with open(cif_name) as f_iso:\n content = f_iso.readlines()\n u = np.zeros(6)\n for e in [line.strip().split() for line in content if len(line.strip().split()) == 2]:\n if 'cell_length_a' in e[0]:\n u[0] = float(e[1])\n elif 'cell_length_b' in e[0]:\n u[1] = float(e[1])\n elif 'cell_length_c' in e[0]:\n u[2] = float(e[1])\n elif 'cell_angle_alpha' in e[0]:\n u[3] = float(e[1])\n elif 'cell_angle_beta' in e[0]:\n u[4] = float(e[1])\n elif 'cell_angle_gamma' in e[0]:\n u[5] = float(e[1])\n a, b, c, alpha, beta, gamma = u\n cosdelta_up = np.cos(np.radians(alpha)) - np.cos(np.radians(beta))*np.cos(np.radians(gamma))\n cosdelta_low = np.sin(np.radians(beta))*np.sin(np.radians(gamma))\n cosdelta = cosdelta_up / cosdelta_low\n sindelta = np.sqrt(1-cosdelta**2)\n la = a*np.array([1.0, 0.0, 0.0])\n lb = b*np.array([np.cos(np.radians(gamma)), np.sin(np.radians(gamma)), 0.0])\n lc = c*np.array([np.cos(np.radians(beta)), np.sin(np.radians(beta))*cosdelta,\n np.sin(np.radians(beta))*sindelta])\n u_lc = lc/np.linalg.norm(lc)\n theta_c_rad = np.arccos(np.clip(np.dot(u_lc, [0, 0, 1]), -1.0, 1.0))\n return la, lb, lc, theta_c_rad", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def processCerFile(k, fb, newcorex=None, newcorey=None, sz=0):\n\n #---- Read Event Header\n evth = unpack('{}f'.format(evthSize), fb.read(evthSize * wordSize))\n #print(evth)\n\n primary = get_primary(evth)\n energy = get_energy(evth)\n height = get_height_first(evth)\n thetaEvtH, phiEvtH = get_direction(evth)\n coreX, coreY, coreD = get_core(evth)\n\n print('{:4d} {:3d} {:9d} {:6.1f} {:8.1f} {:7.1f} {:7.1f} {:8.1f} {:5.1f} {:5.1f}'\n .format(k, int(primary), sz, energy, height, coreX, coreY, coreD, thetaEvtH, phiEvtH))\n\n return\n\n #---- Read Cherenkov photons from file\n\n wl = 999.\n i = 0\n\n while wl > 0.5:\n cphotonData = fb.read(cphotonSize * wordSize)\n \n i = i + 1\n wl, x, y, u, v, t, h = unpack('{}f'.format(cphotonSize), cphotonData)\n w = sqrt(1.0 - u ** 2 - v ** 2)\n \n if wl < 1.:\n continue\n\n wl = wl - 101000.\n\n print('{} {} {:.2f} {:.2f} {:.2f} {:.6f} {:.6f} {:.6f} {:.8f} {:.2f}'\n .format(k, i, wl, x, y, u, v, w, t, h))", "def extract_anisotropy_features (Parameters, image, mask=None):\n \n data_inputs = {}\n \n Ka, Kb, Kc = Parameters.kA, Parameters.kB, Parameters.kC\n \n \n h, w, channels = image.shape\n \n if channels == 2:\n channel_types = [\"Para\", \"Perp\"]\n elif channels == 3:\n channel_types = [\"Open\", \"Para\", \"Perp\"]\n \n \n for index, channel in enumerate(channel_types):\n \n data_inputs[channel] = np.sum(image[:,:, index])/np.count_nonzero(image[:,:, index])\n\n\n #Additional parameters\n para_value = data_inputs['Para']\n perp_value = data_inputs['Perp']\n data_inputs['AniAvg'] = (para_value - perp_value)/(para_value + 2*perp_value)\n \n #With corrections\n data_inputs['Ix'] = Ix = ((Ka+Kb)*perp_value - (Ka+Kc)*para_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['Iy'] = Iy = (Kb*para_value - Kc*perp_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['AniAvg'] = (Ix - Iy)/(Ix + 2*Iy)\n \n\n \n return (data_inputs)", "def extractNOTCAMHeader(file):\n\n try:\n\n hdulist = pyfits.open(file)\n hdulist.close() \n \n if len(hdulist) > 0:\n prihdr = hdulist[0].header\n a = ['notcam','NC',prihdr]\n for i in range(1, len(hdulist)):\n a.append(hdulist[i].header)\t\n return a \n else:\n return ['ERROR']\n\n # Error\n\n except Exception, e:\n raise HeaderException(e)", "def update_header(arr_imgs,obj,filter_i):\n \n for img in arr_imgs:\n warnings.simplefilter('ignore', category=AstropyUserWarning)\n try:\n hdulist = fits.open(img,ignore_missing_end=True)\n #if there is only a primary header get the data from it\n if len(hdulist) == 1:\n data = getdata(img, 0, header=False)\n #if there is more than one header get data from the 'SCI' extension\n else:\n data = getdata(img, 1, header=False)\n #Get value of EXPTIME and PHOTZPT keyword from primary header and \n #set CCDGAIN to a default value of 1\n EXPTIME = hdulist[0].header['EXPTIME']\n PHOTFLAM = hdulist[1].header['PHOTFLAM']\n PHOTZPT = hdulist[1].header['PHOTZPT']\n CCDGAIN = 1.0\n #First pass locating value for gain\n for i in range(2):\n if len(hdulist) == 1:\n break\n #Go through primary and secondary header and ignore the \n #BinTable formatted header\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['CCDGAIN']\n break\n if 'GAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['GAIN']\n break\n if 'ATODGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['ATODGAIN']\n break\n \n #Locating units of image\n print('Doing BUNIT check')\n for i in range(2):\n #If there is only one header then this is the only place to \n #check\n if len(hdulist) == 1:\n bunit = hdulist[0].header['D001OUUN']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'counts':\n ### Rescaling zeropoint\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n hdulist[0].header.set('BUNIT','COUNTS/S')\n hdulist[0].header.set('MAGZPT',ZPT_NEW)\n print('BUNIT is {0}'.format(hdulist[0].\\\n header['BUNIT']))\n \n #If there are multiple headers then they all have to be checked\n else:\n if 'BUNIT' in hdulist[i].header:\n bunit = hdulist[i].header['BUNIT']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'COUNTS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n if bunit == 'ELECTRONS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN*EXPTIME) \\\n + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/(CCDGAIN*EXPTIME))*pixmod\n if bunit == 'ELECTRONS/S':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n if bunit == 'ELECTRONS/SEC':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n hdulist[i].header['BUNIT'] = 'COUNTS/S'\n hdulist[i].header['MAGZPT'] = ZPT_NEW\n ###\n print('BUNIT is {0}'.format(hdulist[i].\\\n header['BUNIT']))\n print('PHOTZPT is {0}'.format(hdulist[i].\\\n header['MAGZPT']))\n print('Done changing BUNIT')\n \n #Second pass to assign gain and exptime to headers\n for i in range(2):\n if len(hdulist) == 1:\n break\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' not in hdulist[i].header:\n hdulist[i].header.set('CCDGAIN',CCDGAIN)\n if 'EXPTIME' not in hdulist[i].header:\n hdulist[i].header.set('EXPTIME',EXPTIME)\n \n #Make new versions of images in interim/obj1 folder\n os.chdir(path_to_interim + obj)\n #Remove .fits extension\n img = os.path.splitext(img)[0]\n #If there was only one header write that header's data to new\n #version of fits image\n if len(hdulist) == 1:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[0].\\\n header,output_verify='ignore')\n #Else write the 'SCI' header's data to new version of fits image\n else:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[1].\\\n header,output_verify='ignore')\n hdulist.close()\n os.chdir(path_to_raw + obj)\n \n #This is to catch 'empty or corrupt FITS file' or any other IOError\n #and write it to a text file along with the object name and the \n #filter name\n except IOError as e:\n os.chdir('..')\n dir_path = os.getcwd()\n if os.path.basename(dir_path) == 'raw':\n os.chdir(path_to_interim)\n with open('Error_swarp.txt','a') as newfile: \n newfile.write('Object {0} and image {1} raises {2}'.\\\n format(obj,img,e))\n newfile.write('\\n')\n newfile.close()\n os.chdir(path_to_raw + obj)\n \n os.chdir(path_to_interim + obj)\n #For this object and filter combination grab all the new versions made\n arr = glob('*test_'+filter_i+'.fits')\n print(len(arr))\n if len(arr) >= 1: #avoid empty cases where files have been removed earlier\n #or don't exist at all since the dictionary also contains\n #pairs of objects and filters that didn't meet the swarp\n #requirements (didn't pass preliminary exptime or filter\n #checks so those folders/images don't exist)\n \n #If new versions exist then write their names to a text file \n with open(filter_i+'_img_list_testfil.txt','wb') as newfile2:\n for obj in arr:\n newfile2.write(obj)\n newfile2.write('\\n')\n newfile2.close()\n #If text file exists return the name\n return filter_i+'_img_list_testfil.txt'\n #If text file doesn't exist return this string\n return 'error'", "def read_conversions(db):\n mpart,Lbox,rsdfac,acheck = None,None,None,None\n with open(db+\"Header/attr-v2\",\"r\") as ff:\n for line in ff.readlines():\n mm = re.search(\"MassTable.*\\#HUMANE\\s+\\[\\s*0\\s+(\\d*\\.\\d*)\\s*0+\\s+0\\s+0\\s+0\\s+\\]\",line)\n if mm != None:\n mpart = float(mm.group(1)) * 1e10\n mm = re.search(\"BoxSize.*\\#HUMANE\\s+\\[\\s*(\\d+)\\s*\\]\",line)\n if mm != None:\n Lbox = float(mm.group(1))\n mm = re.search(\"RSDFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n rsdfac = float(mm.group(1))\n mm = re.search(\"ScalingFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n acheck = float(mm.group(1))\n if (mpart is None)|(Lbox is None)|(rsdfac is None)|(acheck is None):\n print(mpart,Lbox,rsdfac,acheck)\n raise RuntimeError(\"Unable to get conversions from attr-v2.\")\n return mpart, Lbox, rsdfac, acheck\n #", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def verify_header (filename, htypes=None):\n\n # dictionary\n dict_head = {\n # raw header\n # commenting out SIMPLE, BSCALE and BZERO - basic keywords\n # that will be present in images but not in binary fits tables\n #'SIMPLE': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n #'BSCALE': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n #'BZERO': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BITPIX': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS1': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS2': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BUNIT': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n #'CCD-AMP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'SET-TEMP': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'CCD-TEMP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'XBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'YBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n #'CCD-SET': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ALTITUDE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AZIMUTH': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DOMEAZ': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RADESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'EPOCH': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'RA-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'RA-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'DEC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'DEC-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'DEC-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'HA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'FLIPSTAT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'EXPTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ISTRACKI': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'ACQSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'ACQEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPS-SHUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DATE-OBS': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'MJD-OBS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'LST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'UTC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'TIMESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ORIGIN': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MPC-CODE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'TELESCOP': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'CL-BASE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRESSURE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-ROOF': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-STRUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRING': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-SPIDER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M2HOLD': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-GUICAM': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M1': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYWIN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYGET': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYCP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRES-CRY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDAVE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDGUST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDDIR': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELAT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELONG': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ELEVATIO': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n #'WEATIME': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'FILTER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n #'FILTERID': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'CCD-ID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'CONTROLL': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'DETSPEED': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'CCD-NW': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'CCD-NH': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'INSTRUME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FOCUSPOS': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'IMAGETYP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'OBJECT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'AIRMASS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ORIGFILE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'OBSERVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'ABOTVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGNAME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERFQ': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'TRAKTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCX': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n #\n # full header\n 'BB-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'BB-START': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'KW-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'LOG': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'LOG-IMA': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'N-INFNAN': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'XTALK-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'XTALK-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'NONLIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NONLIN-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'GAIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'GAIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'OS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'BIASMEAN': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDNOISE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIAS1A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS1A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK1': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BIAS16A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS16A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK16': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'MBIAS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MBIAS-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MB-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'SATURATE': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NOBJ-SAT': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'MFLAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFLAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MF-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MFRING-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFRING-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FRRATIO': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'COSMIC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NCOSMICS': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NSATS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'REDFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MASKFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'S-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'S-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'S-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'S-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-FWSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-SEEING': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-SEESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELONG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELOSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKGSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-VIGNET': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-CORR': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BKG-CHI2': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-FDEG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-FC0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'A-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-INDEX': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-PSCALE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-PSCALX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-PSCALY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROT': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-ROTX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROTY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'A-NAST': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'A-TNAST': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-NAMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-DRA': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DRASTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDEC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PSF-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'PSF-RAD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-RADP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SIZE': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FRAC': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SAMP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-CFGS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FIX': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'PSF-PLDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PSF-CHI2': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SEE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-PMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PC-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PC-NCAL': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PC-TNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-FNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMIN': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPFDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPF0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-TNSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-MZPD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-MZPS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZPDEF': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZP': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-ZPSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-EXTCO': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AIRMASSC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RA-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DEC-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-AIRM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NSIGMA': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'LIMEFLUX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'LIMMAG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'NOBJECTS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'RADECOFF': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'FORMAT-P': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'DUMCAT': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'QC-FLAG': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'DATEFILE': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n #\n # transient header\n 'SWARP-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'SWARP-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-REF': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-DXYLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-DX': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DY': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DXSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DYSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-FNR': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'Z-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-SIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-BSIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-SCMED': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-SCSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FPEMED': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'Z-FPESTD': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NSIGMA': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-LFLUX': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NTRANS': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-FTRANS': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-LMAG': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-NFAKE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'T-FAKESN': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MC-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MC-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MC-MODEL': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'TDUMCAT': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'TQC-FLAG': {'htype':'trans', 'dtype':str, 'DB':True, 'None_OK':False},\n }\n\n # read header of filename\n if isfile (filename):\n header = read_hdulist (filename, get_data=False, get_header=True)\n else:\n # return success=False if it does not exist\n log.warning ('file {} does not exist; not able to verify its header'\n .format(filename))\n return False\n\n\n # force [htypes] to be a list\n htypes_list = list(htypes)\n\n # loop keys in dict_head\n for key in dict_head.keys():\n\n # only check keywords with htype matching the input [htypes]\n if dict_head[key]['htype'] not in htypes_list:\n continue\n\n # check that key is present in header\n if key in header:\n\n # provide warning if dtype not as expected and header\n # keyword value is not 'None'\n if (dict_head[key]['dtype'] != type(header[key]) and\n header[key] != 'None'):\n log.warning ('dtype of keyword {}: {} does not match the '\n 'expected dtype: {} in header of {}'\n .format(key, type(header[key]),\n dict_head[key]['dtype'], filename))\n\n # if key goes to DataBase and value is 'None' or None\n # while 'None_OK' is False, raise an exception\n if (dict_head[key]['DB'] and not dict_head[key]['None_OK'] and\n (header[key] is None or header[key] == 'None')):\n msg = ('DataBase keyword {} not allowed to have \\'None\\' or '\n 'None value in header of {}'.format(key, filename))\n log.error (msg)\n raise ValueError (msg)\n\n\n else:\n msg = 'keyword {} not present in header of {}'.format(key, filename)\n # if keyword will be ingested into the database, raise an exception\n if dict_head[key]['DB']:\n log.error (msg)\n raise KeyError (msg)\n\n else:\n log.warning (msg)\n\n\n return", "def bvec_errorprop(header,fld,inc,azi,err_fld,err_inc,err_azi,cc_fi,cc_fa,cc_ia):\n # Get parameters from header\n crpix1 = header['CRPIX1']\n crpix2 = header['CRPIX2']\n cdelt1 = header['CDELT1']\n cdelt2 = header['CDELT2']\n crval1 = header['CRVAL1']\n crval2 = header['CRVAL2']\n rsun_obs = header['RSUN_OBS']\t#solar disk radius in arcsec\n crota2 = header['CROTA2']\t\t#neg p-angle\n crlt_obs = header['CRLT_OBS']\t#disk center latitude\n\n nx0 = fld.shape[1]\n ny0 = fld.shape[0]\n\n # Get longitude/latitude\n xi = np.zeros((ny0,nx0))\n eta = np.zeros((ny0,nx0))\n for i in range(nx0):\n xi[:,i] = ((i + 1 - crpix1)*cdelt1 + crval1)/rsun_obs\n for j in range(ny0):\n eta[j,:] = ((j + 1 - crpix2)*cdelt2 + crval2)/rsun_obs\n\n lat,lon = img2sph(xi,eta,lonc=0.0,latc=np.radians(crlt_obs),\n asd=np.radians(rsun_obs/3.6e3),pa=np.radians(-1*crota2))\n\n latc = np.radians(crlt_obs)\n lonc = 0.0\n pAng = np.radians((-1.0) * crota2)\n\n a11 = (-np.sin(latc)*np.sin(pAng)*np.sin(lon - lonc)\n + np.cos(pAng)*np.cos(lon - lonc))\n a12 = (np.sin(latc)*np.cos(pAng)*np.sin(lon - lonc)\n + np.sin(pAng)*np.cos(lon - lonc))\n a13 = (-np.cos(latc)*np.sin(lon - lonc))\n a21 = (-np.sin(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc)\n + np.cos(pAng)*np.sin(lon - lonc))\n - np.cos(lat)*np.cos(latc)*np.sin(pAng))\n a22 = (np.sin(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc)\n - np.sin(pAng)*np.sin(lon - lonc))\n + np.cos(lat)*np.cos(latc)*np.cos(pAng))\n a23 = (-np.cos(latc)*np.sin(lat)*np.cos(lon - lonc)\n + np.sin(latc)*np.cos(lat))\n a31 = (np.cos(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc)\n + np.cos(pAng)*np.sin(lon - lonc))\n - np.sin(lat)*np.cos(latc)*np.sin(pAng))\n a32 = (-np.cos(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc)\n - np.sin(pAng)*np.sin(lon - lonc))\n + np.sin(lat)*np.cos(latc)*np.cos(pAng))\n a33 = (np.cos(lat)*np.cos(latc)*np.cos(lon - lonc)\n + np.sin(lat)*np.sin(latc))\n\n # Sine/cosine\n sin_inc = np.sin(inc)\n cos_inc = np.cos(inc)\n sin_azi = np.sin(azi)\n cos_azi = np.cos(azi)\n\n # Covariance\n var_fld = err_fld * err_fld\n var_inc = err_inc * err_inc\n var_azi = err_azi * err_azi\n cov_fi = err_fld * err_inc * cc_fi\n cov_fa = err_fld * err_azi * cc_fa\n cov_ia = err_inc * err_azi * cc_ia\n\n # Partial derivatives\n dBp_dfld = (-a11*sin_inc*sin_azi + a12*sin_inc*cos_azi + a13*cos_inc)\n dBp_dinc = (-a11*cos_inc*sin_azi + a12*cos_inc*cos_azi - a13*sin_inc)*fld\n dBp_dazi = (-a11*sin_inc*cos_azi - a12*sin_inc*sin_azi)*fld\n\n dBt_dfld = (-a21*sin_inc*sin_azi + a22*sin_inc*cos_azi + a23*cos_inc)*(-1)\n dBt_dinc = (-a21*cos_inc*sin_azi + a22*cos_inc*cos_azi - a23*sin_inc)*fld*(-1)\n dBt_dazi = (-a21*sin_inc*cos_azi - a22*sin_inc*sin_azi)*fld*(-1)\n\n dBr_dfld = (-a31*sin_inc*sin_azi + a32*sin_inc*cos_azi + a33*cos_inc)\n dBr_dinc = (-a31*cos_inc*sin_azi + a32*cos_inc*cos_azi - a33*sin_inc)*fld\n dBr_dazi = (-a31*sin_inc*cos_azi - a32*sin_inc*sin_azi)*fld\n\n # Final variances\n var_bp = (dBp_dfld*dBp_dfld*var_fld\n + dBp_dinc*dBp_dinc*var_inc\n + dBp_dazi*dBp_dazi*var_azi\n + 2*dBp_dfld*dBp_dinc*cov_fi\n + 2*dBp_dfld*dBp_dazi*cov_fa\n + 2*dBp_dinc*dBp_dazi*cov_ia)\n\n var_bt = (dBt_dfld*dBt_dfld*var_fld\n + dBt_dinc*dBt_dinc*var_inc\n + dBt_dazi*dBt_dazi*var_azi\n + 2*dBt_dfld*dBt_dinc*cov_fi\n + 2*dBt_dfld*dBt_dazi*cov_fa\n + 2*dBt_dinc*dBt_dazi*cov_ia)\n\n var_br = (dBr_dfld*dBr_dfld*var_fld\n + dBr_dinc*dBr_dinc*var_inc\n + dBr_dazi*dBr_dazi*var_azi\n + 2*dBr_dfld*dBr_dinc*cov_fi\n + 2*dBr_dfld*dBr_dazi*cov_fa\n + 2*dBr_dinc*dBr_dazi*cov_ia)\n\n return var_bp,var_bt,var_br", "def read_conversions(db):\n mpart,Lbox,rsdfac,acheck = None,None,None,None\n with open(db+\"Header/attr-v2\",\"r\") as ff:\n for line in ff.readlines():\n mm = re.search(\"MassTable.*\\#HUMANE\\s+\\[\\s*0\\s+(\\d*\\.\\d*)\\s*0+\\s+0\\s+0\\s+0\\s+\\]\",line)\n if mm != None:\n mpart = float(mm.group(1)) * 1e10\n mm = re.search(\"BoxSize.*\\#HUMANE\\s+\\[\\s*(\\d+)\\s*\\]\",line)\n if mm != None:\n Lbox = float(mm.group(1))\n mm = re.search(\"RSDFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n rsdfac = float(mm.group(1))\n mm = re.search(\"ScalingFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n acheck = float(mm.group(1))\n if (mpart is None)|(Lbox is None)|(rsdfac is None)|(acheck is None):\n print(mpart,Lbox,rsdfac,acheck)\n raise RuntimeError(\"Unable to get conversions from attr-v2.\")\n if np.abs(acheck-aa)>1e-4:\n raise RuntimeError(\"Read a={:f}, expecting {:f}.\".format(acheck,aa))\n return(rsdfac)\n #", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def sample_vcf():\n file_content = b\"\"\"##fileformat=VCFv4.2\n##hailversion=0.2.100-2ea2615a797a\n##INFO=<ID=QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=SB,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_pab_max,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SB_TABLE,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=AS_VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=transmitted_singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=omni,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=mills,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=monoallelic,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=AS_VQSLOD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=InbreedingCoeff,Number=1,Type=Float,Description=\"\">\n##FILTER=<ID=AC0,Description=\"Allele count is zero after filtering out low-confidence genotypes (GQ < 20; DP < 10; and AB < 0.2 for het calls)\">\n##FILTER=<ID=AS_VQSR,Description=\"Failed VQSR filtering thresholds of -2.7739 for SNPs and -1.0606 for indels\">\n##contig=<ID=chr1,length=248956422,assembly=GRCh38>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\nchr1\t10330\t.\tCCCCTAACCCTAACCCTAACCCTACCCTAACCCTAACCCTAACCCTAACCCTAA\tC\t.\tPASS\tQUALapprox=21493;SB=325,1077,113,694;MQ=32.1327;MQRankSum=0.720000;VarDP=2236;AS_ReadPosRankSum=-0.736000;AS_pab_max=1.00000;AS_QD=5.17857;AS_MQ=29.5449;QD=9.61225;AS_MQRankSum=0.00000;FS=8.55065;AS_FS=.;ReadPosRankSum=0.727000;AS_QUALapprox=145;AS_SB_TABLE=325,1077,2,5;AS_VarDP=28;AS_SOR=0.311749;SOR=1.48100;singleton;AS_VQSLOD=13.4641;InbreedingCoeff=-0.000517845\"\"\"\n file = io.BytesIO(file_content)\n return file", "def getCl(filename):\n powSpec = pf.getdata(filename,1)\n temps = powSpec.field('TEMPERATURE')\n ell = np.arange(temps.size)\n return ell,temps", "def test_reads_photomodeler_sigmas_from_report() -> None:\n imgsz = (4288, 2848)\n sigmas = {\n \"focal\": 0.001,\n \"xp\": 0.001,\n \"yp\": 7.1e-004,\n \"fw\": 1.7e-004,\n \"fh\": 0.0,\n \"k1\": 2.0e-007,\n \"k2\": 1.2e-009,\n \"k3\": 0.0,\n \"p1\": 3.5e-007,\n \"p2\": 0.0,\n }\n path = os.path.join(\"tests\", \"CalibrationReport.txt\")\n xcam_auto = PhotoModeler.from_report(path, imgsz=imgsz, sigmas=True)\n xcam_manual = PhotoModeler(imgsz=imgsz, **sigmas)\n assert vars(xcam_auto) == vars(xcam_manual)", "def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs", "def generate_ctf(p):\n\tfrom EMAN2 import EMAN2Ctf\n\n\tdefocus = p[0]\n\tcs = p[1]\n\tvoltage = p[2]\n\tpixel_size = p[3]\n\tbfactor = p[4]\n\tamp_contrast = p[5]\n\t\n\tif defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention\n\t\tdefocus *= 1e-4\n\t\n\tif amp_contrast < 1.0:\n\t\tfrom math import sqrt\n\t\tamp_contrast = amp_contrast*100/sqrt(2*amp_contrast**2-2*amp_contrast+1)\n\n\tctf = EMAN2Ctf()\n\tif(len(p) == 6):\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast})\n\telse:\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast,'dfdiff':p[6],'dfang':p[7]})\n\t\t\n\treturn ctf", "def scalarInfo(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"perimeter\":m[\"perimeter\"], \"oreientation\":m[\"orientation\"], \"solidity\":m[\"solidity\"],\"height\":m[\"height\"], \"extent\":m[\"extent\"], \"aspect ratio\":m[\"aspect ratio\"], \"area\":m[\"area\"], \"sum intensity\":m[\"sum intensity\"], \"width\":m[\"width\"], \"equivalent diameter\": m[\"equivalent diameter\"], \"mean intensity\": m[\"mean intensity\"]}\n\treturn d", "def read_camera_params(h5_dataset):\n fx = h5_dataset[0]\n fy = h5_dataset[1]\n skew = h5_dataset[2]\n cx = h5_dataset[3]\n cy = h5_dataset[4]\n K = np.array([[fx, skew, cx],\n [0, fy, cy],\n [0, 0, 1]], dtype=np.float64)\n R = np.array([[h5_dataset[5], h5_dataset[8], h5_dataset[11]],\n [h5_dataset[6], h5_dataset[9], h5_dataset[12]],\n [h5_dataset[7], h5_dataset[10], h5_dataset[13]]], dtype=np.float64)\n t = np.array([h5_dataset[14], h5_dataset[15], h5_dataset[16]], dtype=np.float64)\n return K, R, t", "def f2c_file_read_function():\n with open('data.txt', 'r') as infile:\n data = [i.strip().split() for i in infile] # store data as list\n\n F = float(data[-1][-1]) # last item in data should be value\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def _get_econt_info(self, out_log):\n f = open_general(out_log)\n tmptxt = f.readlines()\n f.close()\n econt = {}\n itmp = search_string('[read_energy] number of energy points', tmptxt)\n if itmp>=0: econt['Nepts'] = int(tmptxt.pop(itmp).split()[-1])\n itmp = search_string('energies and weights are:', tmptxt)\n if itmp>=0:\n tmp = []\n for ie in range(econt['Nepts']):\n tmpline = tmptxt[itmp+4+ie].split()[1:]\n tmp.append([float(tmpline[0]), float(tmpline[1]), float(tmpline[2]), float(tmpline[3])])\n tmp = array(tmp)\n econt['epts'] = tmp[:,:2]\n econt['weights'] = tmp[:,2:]\n econt['emin'] = tmp[0,0]\n return econt", "def _readHeader(self):\n self.ControllerVersion = self._readInt(0)\n self.LogicOutput = self._readInt(2)\n self.AppHiCapLowNoise = self._readInt(4)\n self.TimingMode = self._readInt(8)\n self.Exposure = self._readFloat(10)\n self.DetTemperature = self._readFloat(36)\n self.DetectorType = self._readInt(40)\n self.TriggerDiode = self._readInt(44)\n self.DelayTime = self._readFloat(46)\n self.ShutterControl = self._readInt(50)\n self.AbsorbLive = self._readInt(52)\n self.AbsorbMode = self._readInt(54)\n self.CanDoVirtualChip = self._readInt(56)\n self.ThresholdMinLive = self._readInt(58)\n self.ThresholdMin = self._readFloat(60)\n self.ThresholdMaxLive = self._readInt(64)\n self.ThresholdMax = self._readFloat(66)\n self.ADCOffset = self._readInt(188)\n self.ADCRate = self._readInt(190)\n self.ADCType = self._readInt(192)\n self.ADCRes = self._readInt(194)\n self.ADCBitAdj = self._readInt(196)\n self.Gain = self._readInt(198)\n self.GeometricOps = self._readInt(600)", "def main():\n regexham = r'\\s+\\((\\d+,\\s*\\d+)\\)\\s+([\\-+]?\\d+\\.\\d+[eEdD]?[\\-+]?\\d+)' #to extract the Hamiltonian.\n root = '.'\n #fname = 'output_files/'\n ciffci = CIFlow_Reader('testfci.dat', regexp = regexham , read_ham= True)\n ciffcipar = CIFlow_Reader( 'psi0_output10outputfci.dat', regexp = regexham , read_ham = True)\n #print ciffci.calc_overlap(cifdoci)\n #print e.get_groundstate('00000000000011|00000000000011') \n\n psir = rp.PsiReader('psi0_output10.dat', isbig = False, numorbs = -1 , read_ints = False)\n\n detlist = dw.cimain(psir.values['nalpha'],psir.values['nbeta'], psir.values['norb'], [range(1,psir.values['nalpha']+psir.values['nbeta']), []], [] , fname = 'determinants.dat' ,ref = [lambda x , y , z : psir.get_hf_orbs()] , add_frozen = 0, write = False) #CISDDOCI\n count = 0\n for det in detlist:\n for det2 in detlist:\n #+ because the eigenvectors have already a different phasefactor of 1.\n if abs(ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) - ciffcipar.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) ) > 1e-10 :\n print 'difference in hamiltonian row: ' , det[0]+'|'+det[1] , \" col: \" , det2[0]+'|'+det2[1] , 'fci: ', ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) , 'fciaddres: ' , ciffcipar.get_mat_element(det[0]+'|'+det[1],det2[0]+'|'+det2[1]) \n count += 1\n print 'There were ' , count , ' different elements'", "def calcFeatureDescr(covarianceMatrix):\n D, V = scplinag.eig(covarianceMatrix)\n # We sort the array with eigenvalues by size (from smallest to largest value)\n D.sort()\n # Get eigenvectors\n e1 = V[2] # eigenvector in direction of largest variance\n e2 = V[1] # second eigenvector, perpend. to e1\n e3 = V[0]\n # Find the eigenvalues\n evalue1 = D[2] # largest\n evalue2 = D[1]\n evalue3 = D[0] # smallest\n\n # Linearity\n lambda1 = (evalue1 - evalue2) / evalue1\n # Planarity\n lambda2 = (evalue2 - evalue3) / evalue1\n # Scattering\n lambda3 = evalue3 / evalue1\n # Omnivariance\n misc1 = np.prod(D)\n lambda4 = pow(misc1,(1.0/3))\n # Anisotropy\n lambda5 = (evalue1 - evalue3) / evalue1\n # Eigentropy\n s = 0\n count = 0\n for elem in D:\n if elem == 0:\n s = 0\n count = 1\n else:\n # Only if bigger than 0\n misc2 = (elem*np.log(elem))\n if misc2 == 0:\n print \"Multiplication result too close to zero.\"\n s = 0\n else:\n s = s + misc2\n lambda6 = (-1)*s \n # Sum of eigenvalues\n lambda7 = sum(D)\n # Change of curvature\n lambda8 = evalue3/sum(D)\n \n featureDescriptor = np.array([lambda1, lambda2, lambda3, lambda4, lambda5, lambda6, lambda7, lambda8])\n return featureDescriptor, count", "def read_spe(spefilename, verbose=False):\n \n # open SPE file as binary input\n spe = open(spefilename, \"rb\")\n \n # Header length is a fixed number\n nBytesInHeader = 4100\n\n # Read the entire header\n header = spe.read(nBytesInHeader)\n \n # version of WinView used\n swversion = struct.unpack_from(\"16s\", header, offset=688)[0]\n \n # version of header used\n # Eventually, need to adjust the header unpacking\n # based on the headerVersion. \n headerVersion = struct.unpack_from(\"f\", header, offset=1992)[0]\n \n # which camera controller was used?\n controllerVersion = struct.unpack_from(\"h\", header, offset=0)[0]\n if verbose:\n print (\"swversion = \", swversion)\n print (\"headerVersion = \", headerVersion)\n print (\"controllerVersion = \", controllerVersion)\n \n # Date of the observation\n # (format is DDMONYYYY e.g. 27Jan2009)\n date = struct.unpack_from(\"9s\", header, offset=20)[0]\n \n # Exposure time (float)\n exp_sec = struct.unpack_from(\"f\", header, offset=10)[0]\n \n # Intensifier gain\n pimaxGain = struct.unpack_from(\"h\", header, offset=148)[0]\n\n # Not sure which \"gain\" this is\n gain = struct.unpack_from(\"H\", header, offset=198)[0]\n \n # Data type (0=float, 1=long integer, 2=integer, 3=unsigned int)\n data_type = struct.unpack_from(\"h\", header, offset=108)[0]\n\n comments = struct.unpack_from(\"400s\", header, offset=200)[0]\n\n # CCD Chip Temperature (Degrees C)\n detectorTemperature = struct.unpack_from(\"f\", header, offset=36)[0]\n\n # The following get read but are not used\n # (this part is only lightly tested...)\n analogGain = struct.unpack_from(\"h\", header, offset=4092)[0]\n noscan = struct.unpack_from(\"h\", header, offset=34)[0]\n pimaxUsed = struct.unpack_from(\"h\", header, offset=144)[0]\n pimaxMode = struct.unpack_from(\"h\", header, offset=146)[0]\n\n ########### here's from Kasey\n #int avgexp 2 number of accumulations per scan (why don't they call this \"accumulations\"?)\n #TODO: this isn't actually accumulations, so fix it... \n accumulations = struct.unpack_from(\"h\", header, offset=668)[0]\n if accumulations == -1:\n # if > 32767, set to -1 and \n # see lavgexp below (668) \n #accumulations = struct.unpack_from(\"l\", header, offset=668)[0]\n # or should it be DWORD, NumExpAccums (1422): Number of Time experiment accumulated \n accumulations = struct.unpack_from(\"l\", header, offset=1422)[0]\n \n \"\"\"Start of X Calibration Structure (although I added things to it that I thought were relevant,\n like the center wavelength...\"\"\"\n xcalib = {}\n \n #SHORT SpecAutoSpectroMode 70 T/F Spectrograph Used\n xcalib['SpecAutoSpectroMode'] = bool( struct.unpack_from(\"h\", header, offset=70)[0] )\n\n #float SpecCenterWlNm # 72 Center Wavelength in Nm\n xcalib['SpecCenterWlNm'] = struct.unpack_from(\"f\", header, offset=72)[0]\n \n #SHORT SpecGlueFlag 76 T/F File is Glued\n xcalib['SpecGlueFlag'] = bool( struct.unpack_from(\"h\", header, offset=76)[0] )\n\n #float SpecGlueStartWlNm 78 Starting Wavelength in Nm\n xcalib['SpecGlueStartWlNm'] = struct.unpack_from(\"f\", header, offset=78)[0]\n\n #float SpecGlueEndWlNm 82 Starting Wavelength in Nm\n xcalib['SpecGlueEndWlNm'] = struct.unpack_from(\"f\", header, offset=82)[0]\n\n #float SpecGlueMinOvrlpNm 86 Minimum Overlap in Nm\n xcalib['SpecGlueMinOvrlpNm'] = struct.unpack_from(\"f\", header, offset=86)[0]\n\n #float SpecGlueFinalResNm 90 Final Resolution in Nm\n xcalib['SpecGlueFinalResNm'] = struct.unpack_from(\"f\", header, offset=90)[0]\n\n # short BackGrndApplied 150 1 if background subtraction done\n xcalib['BackgroundApplied'] = struct.unpack_from(\"h\", header, offset=150)[0]\n BackgroundApplied=False\n if xcalib['BackgroundApplied']==1: BackgroundApplied=True\n\n # float SpecGrooves 650 Spectrograph Grating Grooves\n xcalib['SpecGrooves'] = struct.unpack_from(\"f\", header, offset=650)[0]\n\n # short flatFieldApplied 706 1 if flat field was applied.\n xcalib['flatFieldApplied'] = struct.unpack_from(\"h\", header, offset=706)[0]\n flatFieldApplied=False\n if xcalib['flatFieldApplied']==1: flatFieldApplied=True\n \n #double offset # 3000 offset for absolute data scaling */\n xcalib['offset'] = struct.unpack_from(\"d\", header, offset=3000)[0]\n\n #double factor # 3008 factor for absolute data scaling */\n xcalib['factor'] = struct.unpack_from(\"d\", header, offset=3008)[0]\n \n #char current_unit # 3016 selected scaling unit */\n xcalib['current_unit'] = struct.unpack_from(\"c\", header, offset=3016)[0]\n\n #char reserved1 # 3017 reserved */\n xcalib['reserved1'] = struct.unpack_from(\"c\", header, offset=3017)[0]\n\n #char string[40] # 3018 special string for scaling */\n xcalib['string'] = struct.unpack_from(\"40c\", header, offset=3018)\n \n #char reserved2[40] # 3058 reserved */\n xcalib['reserved2'] = struct.unpack_from(\"40c\", header, offset=3058)\n\n #char calib_valid # 3098 flag if calibration is valid */\n xcalib['calib_valid'] = struct.unpack_from(\"c\", header, offset=3098)[0]\n\n #char input_unit # 3099 current input units for */\n xcalib['input_unit'] = struct.unpack_from(\"c\", header, offset=3099)[0]\n \"\"\"/* \"calib_value\" */\"\"\"\n\n #char polynom_unit # 3100 linear UNIT and used */\n xcalib['polynom_unit'] = struct.unpack_from(\"c\", header, offset=3100)[0]\n \"\"\"/* in the \"polynom_coeff\" */\"\"\"\n\n #char polynom_order # 3101 ORDER of calibration POLYNOM */\n xcalib['polynom_order'] = struct.unpack_from(\"c\", header, offset=3101)[0]\n\n #char calib_count # 3102 valid calibration data pairs */\n xcalib['calib_count'] = struct.unpack_from(\"c\", header, offset=3102)[0]\n\n #double pixel_position[10];/* 3103 pixel pos. of calibration data */\n xcalib['pixel_position'] = struct.unpack_from(\"10d\", header, offset=3103)\n\n #double calib_value[10] # 3183 calibration VALUE at above pos */\n xcalib['calib_value'] = struct.unpack_from(\"10d\", header, offset=3183)\n\n #double polynom_coeff[6] # 3263 polynom COEFFICIENTS */\n xcalib['polynom_coeff'] = struct.unpack_from(\"6d\", header, offset=3263)\n\n #double laser_position # 3311 laser wavenumber for relativ WN */\n xcalib['laser_position'] = struct.unpack_from(\"d\", header, offset=3311)[0]\n\n #char reserved3 # 3319 reserved */\n xcalib['reserved3'] = struct.unpack_from(\"c\", header, offset=3319)[0]\n\n #unsigned char new_calib_flag # 3320 If set to 200, valid label below */\n #xcalib['calib_value'] = struct.unpack_from(\"BYTE\", header, offset=3320)[0] # how to do this?\n\n #char calib_label[81] # 3321 Calibration label (NULL term'd) */\n xcalib['calib_label'] = struct.unpack_from(\"81c\", header, offset=3321)\n\n #char expansion[87] # 3402 Calibration Expansion area */\n xcalib['expansion'] = struct.unpack_from(\"87c\", header, offset=3402)\n ########### end of Kasey's addition\n\n if verbose:\n print (\"date = [\"+date+\"]\")\n print (\"exp_sec = \", exp_sec)\n print (\"pimaxGain = \", pimaxGain)\n print (\"gain (?) = \", gain)\n print (\"data_type = \", data_type)\n print (\"comments = [\"+comments+\"]\")\n print (\"analogGain = \", analogGain)\n print (\"noscan = \", noscan)\n print (\"detectorTemperature [C] = \", detectorTemperature)\n print (\"pimaxUsed = \", pimaxUsed)\n\n # Determine the data type format string for\n # upcoming struct.unpack_from() calls\n if data_type == 0:\n # float (4 bytes)\n dataTypeStr = \"f\" #untested\n bytesPerPixel = 4\n dtype = \"float32\"\n elif data_type == 1:\n # long (4 bytes)\n dataTypeStr = \"l\" #untested\n bytesPerPixel = 4\n dtype = \"int32\"\n elif data_type == 2:\n # short (2 bytes)\n dataTypeStr = \"h\" #untested\n bytesPerPixel = 2\n dtype = \"int32\"\n elif data_type == 3: \n # unsigned short (2 bytes)\n dataTypeStr = \"H\" # 16 bits in python on intel mac\n bytesPerPixel = 2\n dtype = \"int32\" # for numpy.array().\n # other options include:\n # IntN, UintN, where N = 8,16,32 or 64\n # and Float32, Float64, Complex64, Complex128\n # but need to verify that pyfits._ImageBaseHDU.ImgCode cna handle it\n # right now, ImgCode must be float32, float64, int16, int32, int64 or uint8\n else:\n print (\"unknown data type\")\n print (\"returning...\")\n sys.exit()\n \n # Number of pixels on x-axis and y-axis\n nx = struct.unpack_from(\"H\", header, offset=42)[0]\n ny = struct.unpack_from(\"H\", header, offset=656)[0]\n \n # Number of image frames in this SPE file\n nframes = struct.unpack_from(\"l\", header, offset=1446)[0]\n\n if verbose:\n print (\"nx, ny, nframes = \", nx, \", \", ny, \", \", nframes)\n \n npixels = nx*ny\n npixStr = str(npixels)\n fmtStr = npixStr+dataTypeStr\n if verbose:\n print (\"fmtStr = \", fmtStr)\n \n # How many bytes per image?\n nbytesPerFrame = npixels*bytesPerPixel\n if verbose:\n print (\"nbytesPerFrame = \", nbytesPerFrame)\n\n # Create a dictionary that holds some header information\n # and contains a placeholder for the image data\n spedict = {'data':[], # can have more than one image frame per SPE file\n 'IGAIN':pimaxGain,\n 'EXPOSURE':exp_sec,\n 'SPEFNAME':spefilename,\n 'OBSDATE':date,\n 'CHIPTEMP':detectorTemperature,\n 'COMMENTS':comments,\n 'XCALIB':xcalib,\n 'ACCUMULATIONS':accumulations,\n 'FLATFIELD':flatFieldApplied,\n 'BACKGROUND':BackgroundApplied\n }\n \n # Now read in the image data\n # Loop over each image frame in the image\n if verbose:\n print (\"Reading image frames number \"),\n for ii in range(nframes):\n iistr = str(ii)\n data = spe.read(nbytesPerFrame)\n if verbose:\n print (iistr,\" \",)\n \n # read pixel values into a 1-D numpy array. the \"=\" forces it to use\n # standard python datatype size (4bytes for 'l') rather than native\n # (which on 64bit is 8bytes for 'l', for example).\n # See http://docs.python.org/library/struct.html\n dataArr = np.array(struct.unpack_from(\"=\"+fmtStr, data, offset=0),\n dtype=dtype)\n\n # Resize array to nx by ny pixels\n # notice order... (y,x)\n dataArr.resize((ny, nx))\n #print dataArr.shape\n\n # Push this image frame data onto the end of the list of images\n # but first cast the datatype to float (if it's not already)\n # this isn't necessary, but shouldn't hurt and could save me\n # from doing integer math when i really meant floating-point...\n spedict['data'].append( dataArr.astype(float) )\n\n if verbose:\n print (\"\")\n \n return spedict", "def pt3_reader(filename):\n with open(filename, 'rb') as f:\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Binary file header\n header_dtype = np.dtype([\n ('Ident', 'S16' ),\n ('FormatVersion', 'S6' ),\n ('CreatorName', 'S18' ),\n ('CreatorVersion', 'S12' ),\n ('FileTime', 'S18' ),\n ('CRLF', 'S2' ),\n ('Comment', 'S256' ),\n ('NumberOfCurves', 'int32' ),\n ('BitsPerRecord', 'int32' ), # bits in each T3 record\n ('RoutingChannels', 'int32' ),\n ('NumberOfBoards', 'int32' ),\n ('ActiveCurve', 'int32' ),\n ('MeasurementMode', 'int32' ),\n ('SubMode', 'int32' ),\n ('RangeNo', 'int32' ),\n ('Offset', 'int32' ),\n ('AcquisitionTime', 'int32' ), # in ms\n ('StopAt', 'uint32'),\n ('StopOnOvfl', 'int32' ),\n ('Restart', 'int32' ),\n ('DispLinLog', 'int32' ),\n ('DispTimeAxisFrom', 'int32' ),\n ('DispTimeAxisTo', 'int32' ),\n ('DispCountAxisFrom', 'int32' ),\n ('DispCountAxisTo', 'int32' ),\n ])\n header = np.fromfile(f, dtype=header_dtype, count=1)\n\n if header['FormatVersion'][0] != b'2.0':\n raise IOError((\"Format '%s' not supported. \"\n \"Only valid format is '2.0'.\") % \\\n header['FormatVersion'][0])\n\n dispcurve_dtype = np.dtype([\n ('DispCurveMapTo', 'int32'),\n ('DispCurveShow', 'int32')])\n dispcurve = np.fromfile(f, dispcurve_dtype, count=8)\n\n params_dtype = np.dtype([\n ('ParamStart', 'f4'),\n ('ParamStep', 'f4'),\n ('ParamEnd', 'f4')])\n params = np.fromfile(f, params_dtype, count=3)\n\n repeat_dtype = np.dtype([\n ('RepeatMode', 'int32'),\n ('RepeatsPerCurve', 'int32'),\n ('RepeatTime', 'int32'),\n ('RepeatWaitTime', 'int32'),\n ('ScriptName', 'S20' )])\n repeatgroup = np.fromfile(f, repeat_dtype, count=1)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Hardware information header\n hw_dtype = np.dtype([\n ('HardwareIdent', 'S16' ),\n ('HardwarePartNo', 'S8' ),\n ('HardwareSerial', 'int32'),\n ('SyncDivider', 'int32'),\n ('CFDZeroCross0', 'int32'),\n ('CFDLevel0', 'int32'),\n ('CFDZeroCross1', 'int32'),\n ('CFDLevel1', 'int32'),\n ('Resolution', 'f4'),\n ('RouterModelCode', 'int32'),\n ('RouterEnabled', 'int32')])\n hardware = np.fromfile(f, hw_dtype, count=1)\n\n rtr_dtype = np.dtype([\n ('InputType', 'int32'),\n ('InputLevel', 'int32'),\n ('InputEdge', 'int32'),\n ('CFDPresent', 'int32'),\n ('CFDLevel', 'int32'),\n ('CFDZCross', 'int32')])\n router = np.fromfile(f, rtr_dtype, count=4)\n\n # Time tagging mode specific header\n ttmode_dtype = np.dtype([\n ('ExtDevices', 'int32' ),\n ('Reserved1', 'int32' ),\n ('Reserved2', 'int32' ),\n ('InpRate0', 'int32' ),\n ('InpRate1', 'int32' ),\n ('StopAfter', 'int32' ),\n ('StopReason', 'int32' ),\n ('nRecords', 'int32' ),\n ('ImgHdrSize', 'int32')])\n ttmode = np.fromfile(f, ttmode_dtype, count=1)\n\n # Special header for imaging. How many of the following ImgHdr\n # array elements are actually present in the file is indicated by\n # ImgHdrSize above.\n ImgHdr = np.fromfile(f, dtype='int32', count=ttmode['ImgHdrSize'][0])\n\n # The remainings are all T3 records\n t3records = np.fromfile(f, dtype='uint32', count=ttmode['nRecords'][0])\n\n timestamps_unit = 1./ttmode['InpRate0']\n nanotimes_unit = 1e-9*hardware['Resolution']\n\n metadata = dict(header=header, dispcurve=dispcurve, params=params,\n repeatgroup=repeatgroup, hardware=hardware,\n router=router, ttmode=ttmode, imghdr=ImgHdr)\n return t3records, timestamps_unit, nanotimes_unit, metadata", "def get_features(img1,mask1, depth1):\n colors = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)\n img3 = img1.copy()\n img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n img1 = clahe.apply(img1) # Applying Clahe\n kp, des = orb.detectAndCompute(img1, mask=mask1) # Computing ORB features\n kp_pts = np.float32([ kp[m].pt for m in range(len(kp))]).reshape(-1,2)\n # Getting Colors\n col = []\n for i in range(len(kp)):\n col.append(colors[kp_pts[i,1].astype(int), kp_pts[i,0].astype(int)])\n col = np.array(col)\n # Getting 2D points\n kp_2d = []\n for m in range(len(kp)):\n kp_2d.append([int(kp[m].pt[0]), int(kp[m].pt[1])])\n kp_2d = np.array(kp_2d).reshape(-1,2)\n \n # Getting the 3D points\n kp_3d, _, _ = convert_3d(kp_2d, depth1, img3)\n \n # Removing points with Zero depth\n my_ind = np.where(kp_3d[:,2]!=0)[0]\n new_kp_3d = kp_3d[my_ind,:]\n new_kp_2d = kp_2d[my_ind,:]\n new_des = des[my_ind,:]\n new_col = col[my_ind,:]\n \n # Removing the duplicates\n uni_3d = np.unique(new_kp_3d, return_index= True, axis=0)[1]\n new_kp_3d1 = new_kp_3d[uni_3d,:]\n new_kp_2d1 = new_kp_2d[uni_3d,:]\n new_des1 = new_des[uni_3d,:]\n new_col1 = new_col[uni_3d,:]\n return kp_3d, kp_2d, des, col", "def _read_coefficients(self):\r\n coeff = self._read_register(_BME280_REGISTER_DIG_T1, 24)\r\n coeff = list(struct.unpack('<HhhHhhhhhhhh', bytes(coeff)))\r\n coeff = [float(i) for i in coeff]\r\n self._temp_calib = coeff[:3]\r\n self._pressure_calib = coeff[3:]\r\n\r\n self._humidity_calib = [0]*6\r\n self._humidity_calib[0] = self._read_byte(_BME280_REGISTER_DIG_H1)\r\n coeff = self._read_register(_BME280_REGISTER_DIG_H2, 7)\r\n coeff = list(struct.unpack('<hBBBBb', bytes(coeff)))\r\n self._humidity_calib[1] = float(coeff[0])\r\n self._humidity_calib[2] = float(coeff[1])\r\n self._humidity_calib[3] = float((coeff[2] << 4) | (coeff[3] & 0xF))\r\n self._humidity_calib[4] = float((coeff[4] << 4) | (coeff[3] >> 4))\r\n self._humidity_calib[5] = float(coeff[5])", "def read_flt(input_file):\n\n if input_file.endswith('.flt') or input_file.endswith('.hdr'):\n input_file = input_file[:-4]\n else:\n print 'Incorrect filename'\n return 0,0 #exits module gracefully\n\n headers = read_headers(input_file)\n\n #read the data as a 1D array and reshape it to the dimensions in the header\n raster_array = read_bin(input_file).reshape(int(headers[1]), int(headers[0]))\n raster_array = raster_array.reshape(int(headers[1]), int(headers[0])) #rows, columns\n\n return raster_array, headers", "def eff_param():\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)", "def __getPressureCalibrationCoefficients(self):\n src13 = self.read_byte_data(self.address, 0x13)\n src14 = self.read_byte_data(self.address, 0x14)\n src15 = self.read_byte_data(self.address, 0x15)\n src16 = self.read_byte_data(self.address, 0x16)\n src17 = self.read_byte_data(self.address, 0x17)\n src18 = self.read_byte_data(self.address, 0x18)\n src19 = self.read_byte_data(self.address, 0x19)\n src1A = self.read_byte_data(self.address, 0x1A)\n src1B = self.read_byte_data(self.address, 0x1B)\n src1C = self.read_byte_data(self.address, 0x1C)\n src1D = self.read_byte_data(self.address, 0x1D)\n src1E = self.read_byte_data(self.address, 0x1E)\n src1F = self.read_byte_data(self.address, 0x1F)\n src20 = self.read_byte_data(self.address, 0x20)\n src21 = self.read_byte_data(self.address, 0x21)\n c00 = (src13 << 12) | (src14 << 4) | (src15 >> 4)\n c00 = getTwosComplement(c00, 20)\n c10 = ((src15 & 0x0F) << 16) | (src16 << 8) | src17\n c10 = getTwosComplement(c10, 20)\n c20 = (src1C << 8) | src1D\n c20 = getTwosComplement(c20, 16)\n c30 = (src20 << 8) | src21\n c30 = getTwosComplement(c30, 16)\n c01 = (src18 << 8) | src19\n c01 = getTwosComplement(c01, 16)\n c11 = (src1A << 8) | src1B\n c11 = getTwosComplement(c11, 16)\n c21 = (src1E < 8) | src1F\n c21 = getTwosComplement(c21, 16)\n return c00, c10, c20, c30, c01, c11, c21", "def preprocess_image(img):\n return (img.astype(np.float32)/255.0 - FACENET_MEAN) / FACENET_STD", "def Read_CCD_image(Path):\n fs = open(Path, 'r')\n \n #Compte le nombre de lignes, oblige pr le moment de tout lire\n # la dernière ligne est vide ! attention, j'initialise nb_line à -1 pour compenser\n nb_line = -1\n while 1: \n txt = fs.readline()\n nb_line = nb_line+1\n if ((txt =='')|(txt == '\\r\\n')): \n break\n fs.close()\n \n \n # je lis une ligne, compte le nombre d'espace et en deduit le nombre de colonne de la matrice\n fs = open(Path, 'r')\n txt = fs.readline()\n ii = 0\n index_line = []\n while 1: # on cherche le premier espace qui limite le premier nombre\n ii = ii+1 \n if (txt[ii:ii+1] == '\\t'):\n index_line.append(ii)\n if (txt[ii:ii+4] == '\\r\\n'):\n break\n nb_col = np.array(index_line).size\n fs.close()\n \n image = np.ones((nb_line,nb_col), dtype = float) # Create the image matrix\n # Pour les axes, je reprends les chiffres obtenus lors de la calibration du mouvement de la pointe.... cad 31nm/pixel...\n #axex = np.linspace(0,0.032*nb_line,nb_line) #microns\n #axey = np.linspace(0,0.032*nb_col,nb_col) #microns\n axex = np.linspace(0,nb_line,nb_line) #pixels\n axey = np.linspace(0,nb_col,nb_col) #pixels\n \n fs = open(Path, 'r')\n \n nb_line = 0 # I need to count the lines to fill the matrix\n while 1: \n txt = fs.readline()\n if ((txt =='')|(txt == '\\r\\n')): \n break\n if txt[0] =='#':\n pass\n else:\n #print(txt)\n ii=-1\n index_line=[]\n while 1: # on cherche le premier espace qui limite le premier nombre\n ii = ii+1 \n if (txt[ii:ii+1] == '\\t'):\n index_line.append(ii)\n if (txt[ii:ii+4] == '\\r\\n'):\n break\n # ici j'ai tous mes index d'espace pour une ligne normalement\n line = []\n line.append(txt[:index_line[0]])\n index_line = np.array(index_line) # premier nombre\n for ii in range (index_line.size -1):\n line.append(np.float(txt[index_line[ii]:index_line[ii+1]]))\n # Il me manque le dernier aussi\n #line.append(np.float(txt[index_line[-1]:])) \n image[nb_line,:] = line\n nb_line = nb_line+1\n #flipping up-down with [::-1,...] then image appears in Python as in the screen in HiPic \n return axex,axey,image[::-1,...]", "def HD_input_snfit_data(self):\n\n dico = cPickle.load(open(SUGAR_parameter_pkl))\n self.read_snfit_results()\n self.read_meta()\n Filtre = np.array([True]*len(self.sn_name))\n self.zcmb = []\n self.z_err = []\n for j in range(len(self.sn_name)):\n if self.sn_name[j] in dico.keys() and self.sn_name[j] :\n\n for i in range (len(self.meta_sn_name_list)):\n if self.sn_name[j] == self.meta_sn_name_list[i]:\n \n self.z_err.append(self.meta_zhl_err[i])\n self.zcmb.append(self.meta_zcmb[i])\n if np.abs(self.x1[j] - self.meta_x1[i]) > 0.001:\n print 'problem with %s include in sample but difference between snfit and meta'%(self.sn_name[j])\n else:\n Filtre[j] = False\n\n for p in dico.keys():\n if p not in self.sn_name:\n print p\n \n self.x1 = self.x1[Filtre]\n self.x1_err = self.x1_err[Filtre] \n self.c = self.c[Filtre]\n self.c_err = self.c_err[Filtre]\n self.mb = self.mb[Filtre]\n self.mb_err = self.mb_err[Filtre]\n self.cov_x0_x1 = self.cov_x0_x1[Filtre]\n self.cov_x0_c = self.cov_x0_c[Filtre]\n self.cov_x1_c = self.cov_x1_c[Filtre]\n self.cov_mb_x1 = self.cov_mb_x1[Filtre]\n self.cov_mb_c = self.cov_mb_c[Filtre]\n self.z = self.z[Filtre]\n self.zcmb = np.array(self.zcmb)\n self.z_err = np.array(self.z_err)\n\n self.cov_y = np.zeros((len(self.mb)*3,len(self.mb)*3))\n\n for i in range (len(self.mb)):\n self.cov_y[i*3,i*3] = self.mb_err[i]**2\n self.cov_y[i*3+ 1,i*3+ 1] = self.x1_err[i]**2\n \n self.cov_y[i*3+ 2,i*3+ 2] = self.c_err[i]**2\n self.cov_y[i*3+ 0,i*3+ 1] = self.cov_mb_x1[i]\n self.cov_y[i*3+ 1,i*3+ 0] = self.cov_mb_x1[i]\n self.cov_y[i*3+ 0,i*3+ 2] = self.cov_mb_c[i]\n self.cov_y[i*3+ 2,i*3+ 0] = self.cov_mb_c[i]\n self.cov_y[i*3+ 1,i*3+ 2] = self.cov_x1_c[i] \n self.cov_y[i*3+ 2,i*3+ 1] = self.cov_x1_c[i] \n \n self.salt_parm = np.array([self.mb,self.x1,self.c]).T\n# print len(self.salt_parm), len(self.cov_y), len(self.z), len(self.zcmb)\n# return self.salt_parm, self.cov_y, self.z, self.meta_zcmb, self.meta_zhl_err, self.sn_name, self.meta_idr\n return self.salt_parm, self.cov_y, self.z, self.zcmb, self.z_err", "def _electron_multiplier(self, hdr):\n d = {}\n d['em yield'], d['em background'], d['em deadtime'] = \\\n unpack(self._bo + 'd 2i', hdr.read(16))\n return d", "def find_cea_coord(header,phi_c,lambda_c,nx,ny,dx,dy):\n nx = int(nx)\n ny = int(ny)\n\n # Array of CEA coords\n x = []\n y = []\n\n for j in range(ny):\n col = []\n row = []\n for i in range(nx):\n col.append(np.radians((i-(nx-1)/2)*dx))\n row.append(np.radians((j-(ny-1)/2)*dy))\n x.append(col)\n y.append(row)\n\n x = np.array(x)\n y = np.array(y)\n\n # Relevant header values\n rSun = header['rsun_obs']/header['cdelt1'] #solar radius in pixels\n disk_latc = np.radians(header['CRLT_OBS'])\n disk_lonc = np.radians(header['CRLN_OBS'])\n disk_xc = header['CRPIX1'] - 1 #disk center wrt lower left of patch\n disk_yc = header['CRPIX2'] - 1\n pa = np.radians(header['CROTA2']*-1)\n\n latc = np.radians(lambda_c)\n lonc = np.radians(phi_c) - disk_lonc\n\n # Convert coordinates\n lat = []\n lon = []\n xi = []\n eta = []\n\n for j in range(ny):\n lat_col = []\n lon_col = []\n xi_col = []\n eta_col = []\n for i in range(nx):\n lat0,lon0 = plane2sphere(x[j,i],y[j,i],latc,lonc)\n lat_col.append(lat0)\n lon_col.append(lon0)\n\n xi0,eta0 = sphere2img(lat0,lon0,disk_latc,0.0,disk_xc,disk_yc,rSun,pa)\n xi_col.append(xi0)\n eta_col.append(eta0)\n lat.append(lat_col)\n lon.append(lon_col)\n xi.append(xi_col)\n eta.append(eta_col)\n\n lat = np.array(lat)\n lon = np.array(lon)\n xi = np.array(xi)\n eta = np.array(eta)\n\n return xi,eta,lat,lon", "def model_4_parameters(num_features, num_classes, image_info):\n parameters = {}\n parameters['num_features'] = num_features\n parameters['num_classes'] = num_classes\n if image_info['key'][:5] == \"pavia\":\n parameters['C'] = 1.0\n else:\n parameters['C'] = 40.0\n \n return parameters", "def extract_features(image):\n\n return maximum_curvature(*preprocess(image), sigma = 3)", "def parse(self, calibration_px=1.0):\n self.isParsingNeeded = False\n self.meta_data = {}\n self.data = []\n #CZI files\n if self.extend == '.czi':\n with czifile.CziFile(self.file_path) as czi:\n data = czi.asarray()\n Header_Metadata = str(czi).split('<ImageDocument>')\n string = '<ImageDocument>'+Header_Metadata[1]\n #print(string.strip(\"'\"))\n metadata = XMLET.fromstring(string.strip(\"'\"))\n try:\n #Query XML fore the metadata for picture shape(X;Y;Z-stacks).\n #Picture Shape.\n shapes = metadata.findall('./Metadata/Information/Image')[0]\n self.meta_data[\"ShapeSizeX\"] = int(shapes.findall('SizeX')[0].text)\n self.meta_data[\"ShapeSizeY\"] = int(shapes.findall('SizeY')[0].text)\n try:\n self.meta_data[\"ShapeSizeZ\"] = int(shapes.findall('SizeZ')[0].text)\n except:\n self.meta_data[\"ShapeSizeZ\"] = 1\n #Get the hyperstack dimension if the image is a hyperstack.\n try:\n self.meta_data[\"ShapeSizeC\"] = int(shapes.findall('SizeC')[0].text)\n except:\n self.meta_data[\"ShapeSizeC\"] = 1\n print(\"No info of color channels 1 assumed\")\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n PixelSizes = metadata.findall('./Metadata/Scaling/Items/Distance')\n self.meta_data['SizeX'] = float(PixelSizes[0].findall('Value')[0].text)*10**6\n self.meta_data['SizeY'] = float(PixelSizes[1].findall('Value')[0].text)*10**6\n self.meta_data['SizeZ'] = float(PixelSizes[2].findall('Value')[0].text)*10**6\n except(ValueError):\n print (\"Metadata fail\")\n\n #Tiff files.\n #Tiff files are problematic because they most likely wont contain the necessary metadata.\n #Try to get the shape info over common dimensions.\n elif self.extend == '.tif':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray()\n for shape in data.shape:\n if shape <5:\n self.meta_data[\"ShapeSizeC\"] = shape\n elif shape <40:\n self.meta_data[\"ShapeSizeZ\"] = shape\n else:\n self.meta_data[\"ShapeSizeY\"] = shape\n self.meta_data[\"ShapeSizeX\"] = shape\n\n #Read Lsm Files.\n elif self.extend == '.lsm':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray(memmap=True)\n headerMetadata = str(tif.pages[0].cz_lsm_scan_info)\n metadataList = headerMetadata.split(\"\\n*\")\n #Get image shape from lsm header SizeC=0 if not given.\n for shapes in metadataList:\n if \"images_height\" in shapes:\n self.meta_data[\"ShapeSizeX\"]= int(shapes.split()[-1])\n if \"images_width\" in shapes:\n self.meta_data[\"ShapeSizeY\"]= int(shapes.split()[-1])\n if \"images_number_planes\" in shapes:\n self.meta_data[\"ShapeSizeZ\"]= int(shapes.split()[-1])\n if \"images_number_channels\" in shapes:\n self.meta_data[\"ShapeSizeC\"]= int(shapes.split()[-1])\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n data = np.swapaxes(data,1,2)\n lsm_header = str(tif.pages[0].tags.cz_lsm_info)\n LsmInfo = lsm_header.split(\", \")\n i = 0\n #Query for pixel size.\n for element in LsmInfo:\n if \"e-0\" in element:\n i += 1\n if i == 1:\n self.meta_data['SizeX'] = (float(element)*10**6)\n if i == 2:\n self.meta_data['SizeY'] = (float(element)*10**6)\n if i == 3:\n self.meta_data['SizeZ'] = (float(element)*10**6)\n\n elif self.extend == \".png\":\n data = misc.imread(self.file_path)\n data = np.expand_dims(np.expand_dims(data[...,0],0),0)\n self.meta_data[\"ShapeSizeC\"] = 1\n self.meta_data[\"ShapeSizeZ\"] = 1\n self.meta_data[\"ShapeSizeX\"] = data.shape[2]\n self.meta_data[\"ShapeSizeY\"] = data.shape[3]\n self.meta_data[\"SizeZ\"] = 1\n self.meta_data[\"SizeX\"] = 0.01\n self.meta_data[\"SizeY\"] = 0.01\n #Bring all formats in the same shape.\n self.data = np.reshape(data,(self.meta_data[\"ShapeSizeC\"],self.meta_data[\"ShapeSizeZ\"],self.meta_data[\"ShapeSizeX\"],self.meta_data[\"ShapeSizeY\"]))\n self.meta_data['ChannelNum'] = self.meta_data[\"ShapeSizeC\"]\n #Set pixel size to manuell value if there are no metadata.\n if self.meta_data == {}:\n self.set_calibration(calibration_px)\n #Set the box for manuel calibration to the actuell pixel size.", "def edf_read(file_name, verbose=False):\n header_values = edf_info(file_name, verbose=verbose)\n f = open(file_name, 'r')\n data_type = esrf_to_numpy_datatype(header_values['DataType'])\n if verbose:\n print(header_values['DataType'], data_type)\n # get the payload size\n payload_size = int(header_values['Size'].split('.')[0])\n # get the image size from the ascii header\n dim_1 = int(header_values['Dim_1'].split('.')[0])\n try:\n dim_2 = int(header_values['Dim_2'].split('.')[0])\n except KeyError:\n if verbose:\n print('Dim_2 not defined in header')\n dim_2 = None\n try:\n dim_3 = int(header_values['Dim_3'].split('.')[0])\n except KeyError:\n if verbose:\n print('Dim_3 not defined in header')\n dim_3 = None\n # now read binary data\n header_size = os.path.getsize(file_name) - payload_size\n f.seek(header_size)\n payload = np.fromfile(f, dtype=data_type)\n if dim_1 and dim_2 and dim_3:\n data = np.reshape(payload, (dim_3, dim_2, dim_1)).transpose(2, 1, 0)\n elif dim_1 and dim_2:\n data = np.reshape(payload, (dim_2, dim_1)).transpose(1, 0)\n else:\n data = np.reshape(payload, (dim_1))\n f.close()\n # pay attention to byte order\n if header_values['ByteOrder'] == 'HighByteFirst':\n data = data.byteswap()\n return data", "def test_crtf_header():\n crtf_str = ('#CRTFv0 CASA Region Text Format version 0\\n'\n 'circle[[42deg, 43deg], 3deg], coord=J2000, color=green')\n reg = Regions.parse(crtf_str, format='crtf')[0]\n assert isinstance(reg, CircleSkyRegion)\n assert reg.center.ra.value == 42.0\n assert reg.center.ra.unit == 'deg'\n assert reg.center.dec.value == 43.0\n assert reg.center.dec.unit == 'deg'\n assert reg.radius.value == 3.0\n assert reg.radius.unit == 'deg'", "def all_feature_extractor(imgpath):\r\n\r\n image = cv2.imread(imgpath)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n # Extracting Gabor Features\r\n feature_dict = gabor_feature_extractor(image)\r\n\r\n feature_dict['Original'] = image\r\n\r\n entropy_img = entropy(image, disk(1))\r\n feature_dict['Entropy'] = entropy_img\r\n\r\n gaussian3_img = nd.gaussian_filter(image, sigma=3)\r\n feature_dict['Gaussian3'] = gaussian3_img\r\n\r\n gaussian7_img = nd.gaussian_filter(image, sigma=7)\r\n feature_dict['Gaussian7'] = gaussian7_img\r\n\r\n sobel_img = sobel(image)\r\n feature_dict['Sobel'] = sobel_img\r\n\r\n canny_edge_img = cv2.Canny(image, 100, 200)\r\n feature_dict['Canny'] = canny_edge_img\r\n\r\n robert_edge_img = roberts(image)\r\n feature_dict['Robert'] = robert_edge_img\r\n\r\n scharr_edge = scharr(image)\r\n feature_dict['Scharr'] = scharr_edge\r\n\r\n prewitt_edge = prewitt(image)\r\n feature_dict['Prewitt'] = prewitt_edge\r\n\r\n median_img = nd.median_filter(image, size=3)\r\n feature_dict['Median'] = median_img\r\n\r\n variance_img = nd.generic_filter(image, np.var, size=3)\r\n feature_dict['Variance'] = variance_img\r\n\r\n return feature_dict", "def _detectors1(self, hdr):\n d = {}\n d['FCs'] = self._exit_slits(hdr)\n\n for n in range(1, 6):\n det = 'Detector {}'.format(n)\n d[det] = self._exit_slits(hdr)\n\n d['LD'] = {}\n d['LD']['exit slit width'], d['LD']['exit slit coeff a'], \\\n d['LD']['exit slit coeff b'], d['E0S'], \\\n d['pressure multicollection chamber'], \\\n d['FCs']['fc background setup positive'], \\\n d['FCs']['fc background setup negative'] = \\\n unpack(self._bo + '4d 32s 2i', hdr.read(72))\n\n d['pressure multicollection chamber'] = \\\n self._cleanup_string(d['pressure multicollection chamber'])\n\n for n in range(1, 6):\n det = 'Detector {}'.format(n)\n d[det].update(self._electron_multiplier(hdr))\n\n d['LD'].update(self._electron_multiplier(hdr))\n\n d['EMBig'] = self._exit_slits(hdr)\n d['EMBig'].update(self._electron_multiplier(hdr))\n\n # 8 bytes unused\n hdr.seek(8, 1)\n return d", "def CalcAtmTransmissionForImage(img, header='', chanInfo='', airmass=1.5,pwv=-1, \n spectralaxis=-1, \n value='transmission', P=-1, H=-1, \n T=-1, altitude=-1):\n if (header == ''):\n print \"imhead\", # the comma prevents the newline so that ...10...20 will be on same line\n header = imhead(img,mode='list')\n if (type(header) != dict):\n # Input was a spectrum rather than an image\n if (chanInfo[1] < 60e9):\n telescopeName = 'ALMA'\n else:\n telescopeName = 'VLA'\n else:\n telescopeName = header['telescope']\n # this will not match up with the plot, which uses numberOfChannelsInCube\n# freqs = getFreqsForImage(img, header, spectralaxis)\n freqs = np.linspace(chanInfo[1]*1e-9,chanInfo[2]*1e-9,chanInfo[0])\n# print \"freqs: %f-%f\" % (freqs[0], freqs[-1])\n numchan = len(freqs)\n lsrkwidth = (chanInfo[2] - chanInfo[1])/(numchan-1)\n result = cubeLSRKToTopo(img, nchan=numchan, f0=chanInfo[1], f1=chanInfo[2], chanwidth=lsrkwidth)\n if (result is None):\n topofreqs = freqs\n else:\n topoWidth = (result[1]-result[0])/(numchan-1)\n topofreqs = np.linspace(result[0], result[1], chanInfo[0]) * 1e-9\n casalogPost(\"Converted LSRK range (%f-%f) to TOPO (%f-%f) over %d channels\" % (chanInfo[1]*1e-9, chanInfo[2]*1e-9,topofreqs[0],topofreqs[-1],numchan))\n P0 = 1000.0 # mbar\n H0 = 20.0 # percent\n T0 = 273.0 # Kelvin\n if (telescopeName.find('ALMA') >= 0 or telescopeName.find('ACA') >= 0):\n pwv0 = 1.0 \n P0 = 563.0\n H0 = 20.0\n T0 = 273.0\n altitude0 = 5059\n elif (telescopeName.find('VLA') >= 0):\n P0 = 786.0\n pwv0 = 5.0 \n altitude0 = 2124\n else:\n pwv0 = 10.0 \n altitude0 = 0\n if (pwv < 0):\n pwv = pwv0\n if (T < 0):\n T = T0\n if (H < 0):\n H = H0\n if (P < 0):\n P = P0\n if (altitude < 0):\n altitude = altitude0\n tropical = 1\n midLatitudeSummer = 2\n midLatitudeWinter = 3\n# print \"image bandwidth = %f GHz\" % (np.max(freqs)-np.min(freqs))\n reffreq = np.mean(topofreqs)\n numchanModel = numchan*1\n chansepModel = (topofreqs[-1]-topofreqs[0])/(numchanModel-1)\n# print \"regridded bandwidth=%f GHz, chansep=%f, reffreq=%f\" % (np.max(topofreqs)-np.min(topofreqs), chansepModel, reffreq)\n nbands = 1\n myqa = createCasaTool(qatool)\n fCenter = create_casa_quantity(myqa, reffreq, 'GHz')\n fResolution = create_casa_quantity(myqa, chansepModel, 'GHz')\n fWidth = create_casa_quantity(myqa, numchanModel*chansepModel, 'GHz')\n myat = casac.atmosphere()\n myat.initAtmProfile(humidity=H, temperature=create_casa_quantity(myqa,T,\"K\"),\n altitude=create_casa_quantity(myqa,altitude,\"m\"),\n pressure=create_casa_quantity(myqa,P,'mbar'),atmType=midLatitudeWinter)\n myat.initSpectralWindow(nbands, fCenter, fWidth, fResolution)\n myat.setUserWH2O(create_casa_quantity(myqa, pwv, 'mm'))\n# myat.setAirMass() # This does not affect the opacity, but it does effect TebbSky, so do it manually.\n myqa.done()\n\n dry = np.array(myat.getDryOpacitySpec(0)[1])\n wet = np.array(myat.getWetOpacitySpec(0)[1]['value'])\n TebbSky = myat.getTebbSkySpec(spwid=0)[1]['value']\n # readback the values to be sure they got set\n \n rf = myat.getRefFreq()['value']\n cs = myat.getChanSep()['value']\n if (myat.getRefFreq()['unit'] != 'GHz'):\n casalogPost(\"There is a unit mismatch for refFreq in the atm code.\")\n if (myat.getChanSep()['unit'] != 'MHz'):\n casalogPost(\"There is a unit mismatch for chanSep in the atm code.\")\n numchanModel = myat.getNumChan()\n freq0 = myat.getChanFreq(0)['value']\n freq1 = myat.getChanFreq(numchanModel-1)['value']\n# print \"atm returned bandwidth = %f GHz = %f to %f \" % (freq1-freq0, freq0, freq1)\n newfreqs = np.linspace(freqs[0], freqs[-1], numchanModel) # fix for SCOPS-4815\n# print \"freqs: %f-%f newfreqs: %f-%f\" % (freqs[0], freqs[-1], newfreqs[0], newfreqs[-1])\n transmission = np.exp(-airmass*(wet+dry))\n TebbSky *= (1-np.exp(-airmass*(wet+dry)))/(1-np.exp(-wet-dry))\n if value=='transmission':\n values = transmission\n else:\n values = TebbSky\n del myat\n return(newfreqs, values)", "def __call__(cls, nir_paw_image_fname, nir_paw_conf_fname, output_template, conf_limit):\n\n# on with the show\n logger.info('Opening science and confidence frames')\n ifits=fitsio.FITS(nir_paw_image_fname,'r')\n cfits=fitsio.FITS(nir_paw_conf_fname,'r')\n\n#\n# Check that the number of HDUs match\n#\n\n if (len(ifits) != len(cfits)):\n print(\"Number of HDUs/extensions in IMAGE and CONFidence files do not match.\")\n print(\"Aborting\")\n exit(1)\n\n p_ih=ifits[0].read_header()\n p_ch=cfits[0].read_header()\n# Remove reserve keywords\n p_ih.clean()\n\n#\n# Extract some keywords from PRIMARY header to propagate into the individual images.\n#\n base_dict={}\n base_header=[]\n for hkeep in nci.nir_paw_primary_keep:\n if (hkeep in p_ih):\n base_header.append({'name':hkeep,'value':p_ih[hkeep],'comment':p_ih.get_comment(hkeep)})\n base_dict[hkeep]={'value':p_ih[hkeep],'comment':p_ih.get_comment(hkeep)}\n else:\n print(\"Keyword {:s} missing in HDU[{:d}]\".format(hkeep,0))\n#\n# If possible, need too keep track of REQTIME (requested frametime) because sometimes \n# EXPTIME seems to be mispopulated in the CCD image HDUs with TEXPTIME\n#\n if ('TEXPTIME' in p_ih):\n texptime=p_ih['TEXPTIME']\n else:\n texptime=None\n if ('REQTIME' in p_ih):\n reqtime=p_ih['REQTIME']\n else:\n reqtime=None\n#\n# print(base_header)\n \n\n#\n# Step through HDUs... and form \"CCD\" images for each HDU\n#\n ExtList=[]\n for hnum in range(1,len(ifits)):\n print(\"############ Begin work on extnum={:d} ###############\".format(hnum))\n\n# Check that extensions match (after that concentrate on image).\n print(hnum,ifits[hnum].get_extname(),cfits[hnum].get_extname())\n if (ifits[hnum].get_extname() != cfits[hnum].get_extname()):\n print(\"Working on extension {:d}. Extension names (image,conf) of ([{:s}],[{:s}]) do not match!\".format(\n hnum,ifits[hnum].get_extname(),cfits[hnum].get_extname()))\n print(\"Aborting!\")\n exit(1)\n\n f_ih=ifits[hnum].read_header()\n f_ih.clean()\n#\n# Fix occurences where the CCD-level keyword EXPTIME has inherited the value of TEXPTIME\n#\n exptime=f_ih['EXPTIME']\n if (reqtime is not None):\n if (exptime > reqtime):\n print(\"Warning: possible corrupt EXPTIME (total exptime rather than frame time present).\")\n print(\"Attempting to update EXTIME to REQTIME (requested frame time).\")\n print(\" Primary HDU: TEXPTIME: {:}\".format(texptime))\n print(\" Primary HDU: REQTIME: {:}\".format(reqtime))\n print(\" Current HDU: EXPTIME: {:} --> {:}\".format(exptime,reqtime))\n exptime=reqtime\n f_ih['EXPTIME']=reqtime\n#\n# Augment keywords pulled from primary header with keywords from current HDU\n#\n c_header=base_header[:]\n c_dict=dict(base_dict)\n for hkeep in nci.nir_paw_hdu_keep:\n if (hkeep in f_ih):\n# print(hkeep,f_ih[hkeep],f_ih.get_comment(hkeep))\n c_header.append({'name':hkeep,'value':f_ih[hkeep],'comment':f_ih.get_comment(hkeep)})\n if (hkeep in c_dict):\n print(\"Warning: Replacing keyword {:s} with value from hdu={:d}\".format(hkeep,hnum))\n c_dict[hkeep]={'value':f_ih[hkeep],'comment':f_ih.get_comment(hkeep)}\n else:\n print(\"Keyword {:s} missing in HDU[{:d}]\".format(hkeep,hnum))\n\n#\n# Get the CCDNUM from special keyword and propagate\n# Get SKYLEVEL, SKYNOISE, ZEROPOINT and form basis value for the weight plane\n#\n ccdnum=f_ih['HIERARCH ESO DET CHIP NO']\n c_header.append({'name':'CCDNUM','value':ccdnum,'comment':'Unique Detector Number'})\n\n# exptime=f_ih['EXPTIME']\n## Fix occurences where the CCD-level keyword EXPTIME has inherited the value of TEXPTIME\n# if (exptime > reqtime):\n# print(\"Warning: possible corrupt EXPTIME (total exptime rather than frame time present).\")\n# print(\"Attempting to update EXTIME to REQTIME (requested frame time).\")\n# print(\" Primary HDU: TEXPTIME: {:.2f}\".format(texptime))\n# print(\" Primary HDU: REQTIME: {:.2f}\".format(reqtime))\n# print(\" Current HDU: EXPTIME: {:.2f} --> {:.2f}\".format(exptime,reqtime))\n# exptime=reqtime\n# f_ih['EXPTIME']=reqtime\n\n mtime=2.5*np.log10(exptime)\n skylev=f_ih['SKYLEVEL']\n skyrms=f_ih['SKYNOISE']\n seeing=f_ih['SEEING']\n magzpt=f_ih['MAGZPT']\n\n# zeropoint include a correction from VEGA->AB\n# zeropoint in headers was found to have a factor for EXPTIME removed (have to add back in for DES-like processing)\n\n if (p_ih['BAND'] in nci.nir_vega_to_ab):\n magzpt=magzpt+nci.nir_vega_to_ab[p_ih['BAND']]+mtime\n else:\n print(\"Warning! Unknown BAND ({:s}) for conversion of zeropoint from VEGA to AB system\".format(p_ih['BAND']))\n\n c_header.append({'name':'SKYBRITE', 'value':skylev, 'comment':'Sky level estimate from IMCORE'})\n c_header.append({'name':'SKYSIGMA', 'value':skyrms, 'comment':'Sky noise estimate from IMCORE'})\n c_header.append({'name':'SKYVARA', 'value':skyrms*skyrms, 'comment':'Sky noise estimate from IMCORE'})\n c_header.append({'name':'SKYVARB', 'value':skyrms*skyrms, 'comment':'Sky noise estimate from IMCORE'})\n c_header.append({'name':'FWHM', 'value':seeing, 'comment':'Average FWHM (pixels)'})\n c_header.append({'name':'MAG_ZERO', 'value':magzpt, 'comment':'Converted MAGZPT(Vega) to AB system'})\n nite_val=convert_utc_str_to_nite(f_ih['DATE-OBS'])\n c_header.append({'name':'NITE', 'value':nite_val, 'comment':'Observation Nite'})\n c_header.append({'name':'SATURATE', 'value':nci.nircam_satval[ccdnum], 'comment': 'Saturation Level (ADU)'})\n c_header.append({'name':'PIXSCAL1', 'value':0.341, 'comment': 'Fiducial pixel scale (arcsec/pix)'})\n c_header.append({'name':'PIXSCAL2', 'value':0.341, 'comment': 'Fiducial pixel scale (arcsec/pix)'})\n\n# bval=f_ih['BSCALE']\n# print(\"BSCALE was: \",bval)\n print(\"SKYLEVEL was: \",skylev)\n print(\"SKYRMS was: \",skyrms)\n#\n# Searching for a proper WGT prescription\n#\n# This was what I took to be equivalent to DES (but perhaps it does not properly factor in N-image stack\n# wgtval=skylev+(skyrms*skyrms)\n print(\"SKYLEV + (SKYRMS*SKYRMS): \",skylev+(skyrms*skyrms))\n#\n# This was assuming SKYLEVEL does not properly inform stats\n# wgtval=(skyrms*skyrms)\n print(\"(SKYRMS*SKYRMS): \",skyrms*skyrms)\n\n#\n# Read the image data from the science and confidence files.\n#\n sci_data=ifits[hnum].read()\n print(\"Median of data {:.3f} \".format(np.median(sci_data)))\n conf_data=cfits[hnum].read()\n\n#\n# Better seemed to be a re-measurement of STD\n#\n print(\"Attempting an improved SKYRMS with 3-sigma clip to remove objects\")\n avgval, medval, stdval = medclip(sci_data,verbose=3)\n# print(avgval,medval,stdval)\n print(\"stdval^2: \",stdval*stdval)\n wgtval=(stdval*stdval)\n# print(wgtval)\n#\n# Use the new (i.e. chip-based header) to feed a WCS \n# Use image size to feed calculations for center and corners (similar to despyastro.CCD_corners\n#\n print(\"Calculating center/corners assuuming native ZPN projection\")\n w=WCS(fitsio.FITSHDR(c_header))\n\n fnax2=float(sci_data.shape[0])\n fnax1=float(sci_data.shape[1])\n corn_x=np.array([fnax1/2.0,1.,fnax1,fnax1,1.])\n corn_y=np.array([fnax2/2.0,1.,1.,fnax2,fnax2])\n sky = w.pixel_to_world(corn_x,corn_y)\n corn_ra=sky.ra.degree\n corn_dec=sky.dec.degree\n\n c_header.append({'name':'RA_CENT', 'value':corn_ra[0], 'comment':'RA center'})\n c_header.append({'name':'DEC_CENT','value':corn_dec[0],'comment':'DEC center'})\n for i in range(1,5):\n c_header.append({'name':'RAC{:d}'.format(i), 'value':corn_ra[i], 'comment':'RA corner {:d}'.format(i)})\n c_header.append({'name':'DECC{:d}'.format(i),'value':corn_dec[i],'comment':'DEC corner {:d}'.format(i)})\n RACMIN, RACMAX, DECCMIN, DECCMAX, CROSSRA0 = get_DESDM_corners_extent(corn_ra, corn_dec)\n c_header.append({'name':'RACMIN', 'value':RACMIN, 'comment':'Minimum extent of image in RA'})\n c_header.append({'name':'RACMAX', 'value':RACMAX, 'comment':'Maximum extent of image in RA'})\n c_header.append({'name':'DECCMIN', 'value':DECCMIN, 'comment':'Minimum extent of image in Declination'})\n c_header.append({'name':'DECCMAX', 'value':DECCMAX, 'comment':'Maximum extent of image in Declination'})\n c_header.append({'name':'CROSSRA0','value':CROSSRA0,'comment':'Does Image Span RA 0h (Y/N)'})\n c_header.append({'name':'DESEPOCH','value':'NIREPOCH','comment':'Default DES epoch definition for including NIR data'})\n#\n#\n#\n print(\"Stripping ZPN projection from WCS and creating a shift to get a rough TAN\")\n recs_to_delete=[] \n for i, hrec in enumerate(c_header):\n if (hrec['name'] == 'CTYPE1'):\n c_header[i]['value']='RA---TAN'\n if (hrec['name'] == 'CTYPE2'):\n c_header[i]['value']='DEC--TAN'\n\n if (hrec['name'] == 'CRVAL1'):\n c_header[i]['value']=corn_ra[0]\n if (hrec['name'] == 'CRVAL2'):\n c_header[i]['value']=corn_dec[0]\n if (hrec['name'] == 'CRPIX1'):\n c_header[i]['value']=fnax1/2.0\n if (hrec['name'] == 'CRPIX2'):\n c_header[i]['value']=fnax2/2.0\n\n if (hrec['name'] in ['PV2_1','PV2_2','PV2_3','PV2_4','PV2_5']):\n recs_to_delete.append(i)\n if (len(recs_to_delete) > 0):\n for i in sorted(recs_to_delete,reverse=True):\n x=c_header.pop(i)\n print(\"Removing: {:}\".format(x))\n\n whack=WCS(fitsio.FITSHDR(c_header))\n skyhack = whack.pixel_to_world(corn_x,corn_y)\n whack_corn_ra=skyhack.ra.degree\n whack_corn_dec=skyhack.dec.degree\n for i in range(5):\n cosdec=np.cos(corn_dec[i]*np.pi/180.)\n dra=3600.*(corn_ra[i]-whack_corn_ra[i])*cosdec\n ddec=3600.*(corn_dec[i]-whack_corn_dec[i])\n print(\" WCS shift {:d} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} \".format(ccdnum,corn_ra[i],corn_dec[i],whack_corn_ra[i],whack_corn_dec[i],dra,ddec))\n\n# for i, hrec in enumerate(c_header):\n# print(i,hrec)\n\n#\n# Form the SCI, MSK, and WGT HDUs\n#\n im=DESImage(init_data=True,init_mask=True,init_weight=True,shape=sci_data.shape)\n\n im.data=np.float32(sci_data)\n msk_wsm=np.where(conf_data<conf_limit)\n im.mask[msk_wsm] |= BADPIX_BPM\n im.weight=np.float32(conf_data/100./wgtval)\n#\n# Check for extra conditions where further masking is needed\n# Here is where CCD=6 check was started (now removed and placed \n# in nir_starmask to take advantage of bright object masking\n#\n\n\n#\n# Deal with individual header-isms and write out SCI, MSK, WGT\n# Note this is using fitsio (duplicating some of the DESIMAGE.save \n# but customization was needed to deal with foibles of the current\n#\n fname=re.sub('%02d','{:02d}'.format(ccdnum),output_template,1)\n ofits = fitsio.FITS(fname, 'rw', clobber=True)\n\n im.header=fitsio.FITSHDR(c_header) \n im.header['DES_EXT']='IMAGE'\n im.header = update_hdr_compression(im.header, 'SCI')\n ofits.write(im.data,extname='SCI',header=im.header)\n\n\n im.mask_hdr=fitsio.FITSHDR(c_header) \n im.mask_hdr['DES_EXT']='MASK'\n im.mask_hdr = update_hdr_compression(im.mask_hdr, 'MSK')\n im.mask_hdr['DES_EXT']='MASK'\n ofits.write(im.mask,extname='MSK',header=im.mask_hdr)\n\n# im.weight_hdr=fitsio.FITSHDR(c_header) \n# print(im.weight_hdr)\n im.weight_hdr = update_hdr_compression(im.weight_hdr, 'WGT')\n# print(im.weight_hdr)\n im.weight_hdr['DES_EXT']='WEIGHT'\n ofits.write(im.weight,extname='WGT',header=im.weight_hdr)\n\n ofits.close()\n print(\"Wrote {:s}\".format(fname))\n print(\" \")\n \n\n ifits.close()\n cfits.close()\n\n ret_code = 0\n return ret_code", "def get_FitsHeader(fitsimage):\n hdu = getheader(fitsimage)\n bmaj = round(float(hdu['BMAJ'])*3600., 2) #arcsec\n bmin = round(float(hdu['BMIN'])*3600., 2) #arcsec\n\n return bmaj, bmin", "def read_input():\n \n argv = sys.argv\n\n # Read file names from sd input\n f_dy = argv[1] # matdyn.modes\n f_pat = argv[2] # path.out (should be in crystal coords)\n f_ph = argv[3] # ph.x output (Gamma point)\n\n # Read input card\n f_inp = open(\"input.dat\",'r')\n l1 = f_inp.readline()\n l2 = f_inp.readline()\n l3 = f_inp.readline().split()\n f_inp.close()\n\n # Open files\n\n f = open(f_dy,'r') # matdyn.modes \n f_dyn = f.readlines()\n f.close()\n\n f = open(f_pat,'r') # path.out\n f_path = f.readlines()\n f.close()\n\n f = open(f_ph,'r') # ph.x output\n f_zs = f.readlines()\n f.close()\n\n # Assign values to a0, nat, M, nqp\n a0, vol = float(l1.split()[0]), float(l1.split()[1])\n nat = int(l2) \n mass = np.zeros(nat)\n for iat in range(nat):\n mass[iat] = float(l3[iat])\n\n # Assign values to G (reciprocal lattice vec)\n ig = 0 ; i = 0\n for line in f_zs:\n if \"reciprocal axes:\" in line:\n ig = i + 1 \n break\n i += 1 \n\n rG = np.zeros((3,3))\n for ic in range(3):\n rGtext = f_zs[ig+ic][23:48].split()\n rG[ic,:] = np.array([float(rGtext[0]), float(rGtext[1]), float(rGtext[2])])\n\n # Read Z* tensor from f_zs\n i = 0\n iz = 0\n zstart = []\n for line in f_zs:\n if \"(d P / du)\" in line:\n iz = i + 3\n if \"Px\" in line:\n zstart.append(i)\n\n i += 1\n\n # Read the dielectric tensor from f_zs\n i = 0\n ie = 0\n for line in f_zs:\n if \"Dielectric constant in cartesian axis\" in line:\n ie = i + 2\n break\n\n i += 1\n\n # Assign Z* values\n zs = np.zeros((nat,3,3)) # initialize Z*\n\n for iat in range(nat):\n for ic in range(3):\n ztext = f_zs[zstart[iat]+ic][19:56].split()\n for jc in range(3):\n zs[iat][ic][jc] = float(ztext[jc])\n\n # Assing the dielectric tensor\n eps = np.zeros((3,3))\n\n for ic in range(3):\n epstext = f_zs[ie+ic][16:66].split()\n for jc in range(3):\n eps[ic][jc] = float(epstext[jc])\n\n # Number of modes and q-points\n nmodes = 3 * nat\n nqpt = int(f_path[0].split()[0])\n\n # Read the q-points\n q = np.zeros((nqpt,4)) # 4th dimension is lenght for q-points on a line, weights for q-points on a grid \n for iq in range(1,nqpt+1):\n q[iq-1,] = np.array([float(f_path[iq].split()[0]),float(f_path[iq].split()[1]), \\\n float(f_path[iq].split()[2]),float(f_path[iq].split()[3])])\n\n # Read the eigenvalues(om) and eigenvectors(eig) \n # Initiate first\n om = np.zeros((nmodes,nqpt))\n eig = np.zeros((nmodes,nqpt,nat,3), dtype=complex) \n\n # Get the starting lines for each q-pt\n i = 0\n i_q = []\n for line in f_dyn:\n if \"q =\" in line:\n i_q.append(i+2)\n i += 1\n\n #Assign values to om and eig\n for iq in range(nqpt):\n for imod in range(nmodes):\n omtext = f_dyn[i_q[iq]+imod*(nat+1)][43:55]\n om[imod][iq] = float(omtext)\n for iat in range(nat):\n etext = f_dyn[i_q[iq]+imod*(nat+1)+iat+1][2:72].split()\n for ic in range(3):\n eig.real[imod][iq][iat][ic]=float(etext[2*ic])*np.sqrt(mass[iat])\n eig.imag[imod][iq][iat][ic]=float(etext[2*ic+1])*np.sqrt(mass[iat])\n\n #Normalize the eigenvectors\n t1 = eig[imod,iq,:,:]\n t_nu = np.sum(np.sum(np.conjugate(t1)*t1,axis=0))\n eig[imod,iq,:,:] = eig[imod,iq,:,:]/np.sqrt(np.abs(t_nu))\n\n # Check normalization\n delta = np.zeros((nmodes,nmodes), dtype=complex)\n for iat in range(nat):\n for ic in range(3):\n t2 = eig[:,iq,iat,ic]\n delta += np.outer(np.conjugate(t2),t2)\n\n unit = np.diag(np.diag(np.ones((nmodes,nmodes)))) # Unit vector\n test = np.abs( (delta-unit) )\n if ( np.max(test) > 1e-3):\n print \"Non-orthonormal eigenvector at iq=\", q[iq,:]\n\n return om, eig, q, zs, eps, mass, a0, vol, rG, nmodes, nqpt, nat", "def preprocess(image):\n return image - MEAN_PIXEL", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def read_kitti_calib(filename):\n\n with open(filename) as f:\n for line in f:\n data = line.split(' ')\n if data[0] == 'P2:':\n calib_P2 = np.array([float(x) for x in data[1:13]])\n calib_P2 = calib_P2.reshape(3, 4)\n return _extend_matrix(calib_P2)\n\n raise Exception(\n 'Could not find entry for P2 in calib file {}'.format(filename))", "def main(idrun):\n int_type = numpy.int32\n double_type = numpy.float64\n float_type = numpy.float32\n complex_type = numpy.complex64\n\n ns = 7\n iudm = 19; iuv = 12\n dname = numpy.array([\"LONGITUDINAL EFIELD \",\"ELEC CURRENT DENSITY\",\n \"VECTOR POTENTIAL \",\"TRANSVERSE EFIELD \",\n \"MAGNETIC FIELD \",\"RADIATIVE VPOTENTIAL\",\n \"ION CURRENT DENSITY \"],dtype=str)\n\n# create string from idrun\n if (idrun < 0):\n cdrun = \"Unknown\"\n while (cdrun.isdigit() == False):\n cdrun = input(\"enter integer idrun: \")\n idrun = int(cdrun)\n cdrun = str(idrun)\n fname = \"diag3.\" + cdrun\n cmfield3.ffopen3(iudm,fname)\n\n# nscalars = table of available diagnostics\n nscalars = numpy.zeros((ns),int_type,'F')\n\n# determine which vector diagnostics are available\n cmfield3.readvdiags3(iudm,nscalars)\n\n nts = numpy.zeros((1),int_type,'F')\n modesx = numpy.zeros((1),int_type,'F')\n modesy = numpy.zeros((1),int_type,'F')\n modesz = numpy.zeros((1),int_type,'F')\n mrec = numpy.zeros((1),int_type,'F')\n fname = numpy.array([\"\"],'S32')\n\n# select diagnostic\n m = numpy.sum(nscalars)\n while True:\n if (m > 0):\n n = -1\n while True:\n if (n < 0):\n for i in range(0,ns):\n if (nscalars[i]==1):\n print (\"enter \", i+1,\" for\", \n numpy.str.rstrip(dname[i]))\n print (\"enter \", 0,\" for EXIT\")\n c = input(\"\")\n if (c.isdigit()):\n n = int(c)\n if (n==0):\n break\n if ((n >= 1) and (n <= ns)):\n if (nscalars[n-1]==0):\n n = -1\n else:\n n = -1\n if (n > 0):\n break\n print (\"invalid entry, try again or enter 0 to quit\")\n else:\n print (\"no vector diagnostic files found\")\n n = 0\n# exit procedure\n if (n==0):\n if (\"vfield\" in globals()):\n vfield = None\n cmfield3.closeff3(iudm)\n return\n\n print (numpy.str.rstrip(dname[n-1]), \" diagnostic selected\")\n\n# return parameters for selected vector diagnostic:\n# nts, modesx, modesy, modesz, nrec, fname\n cmfield3.vdiagparams3(iudm,n,nts,modesx,modesy,modesz,mrec,fname)\n nrec = mrec[0]\n\n# nx/ny/nz = number of global grid points in x/y/z direction\n nx = int(math.pow(2,in3.indx)); ny = int(math.pow(2,in3.indy))\n nz = int(math.pow(2,in3.indz))\n# kyp/kzp = number of real grids in each field partition in y/z\n kyp = int((ny - 1)/in3.nvpy) + 1; kzp = int((nz - 1)/in3.nvpz) + 1\n# kyb/kzb = minimum number of processors in distributed array in y/z\n kyb = int((ny - 1)/kyp) + 1; kzb = int((nz - 1)/kzp) + 1\n# nyv = second dimension of scalar field array, >= ny\n# nzv = third dimension of scalar field array, >= nz\n nyv = kyp*kyb; nzv = kzp*kzb\n\n# allocate vector array\n if (\"vfield\" not in globals()):\n vfield = numpy.empty((in3.ndim,nx,nyv,nzv),float_type,'F')\n dt = in3.dt*float(nts[0])\n\n# open stream file for vector field\n cmfield3.fsopen3(iuv,fname)\n\n# nrec = number of complete records\n nrec = int(nrec/(kyb*kzb))\n print (\"records found: nrec = \", nrec)\n\n# read and transpose vector data\n for ii in range(0,nrec):\n# read real vector field\n cmfield3.freadv3(iuv,vfield,in3.ndim,nx,kyp,kyb,kzp,kzb)\n it = nts[0]*ii\n time = dt*float(ii)\n# show time\n print (\"it,time=\",it,time)\n cmfield3.closeff3(iuv)\n print()", "def get_head_correct_info(raw_filename, epoch_filename, N=-1):\n trans = get_ctf_trans(raw_filename)\n fiducials = get_ref_head_pos(epoch_filename, trans, N=N)\n raw = mne.io.ctf.read_raw_ctf(raw_filename)\n info = replace_fiducials(raw.info, fiducials)\n return trans, fiducials, info", "def nircam_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F444W', grism='DFSR'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0648, 0], [0, 0.0648]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F277W':0.30, 'F356W':0.90, 'F444W': 3.00, 'F322W2':1.25, 'F430M':0.65, 'F460M':0.86, 'F410M':0.5} # F410M is a hack, no number\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRCam'\n h['READN'] = 9, 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'DFSR':\n h['GRISM'] = 'DFSR', 'Spectral trace along X'\n else:\n h['GRISM'] = 'DFSC', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def PImageT2Spec (inImage, outImage, nTerm, \n inCCVer, outCCVer, err,\n refFreq=1.0e9, terms=None, startCC=1, endCC=0, \n dropNeg=True, dist=None):\n ################################################################\n # Checks\n if not Image.PIsA(inImage):\n raise TypeError,\"inImage MUST be a Python Obit Image\"\n if not Image.PIsA(outImage):\n raise TypeError,\"outImage MUST be a Python Obit Image\"\n if not OErr.OErrIsA(err):\n raise TypeError,\"err MUST be an OErr\"\n\n # Limit on distance\n if dist==None:\n limit = 1.0e20\n else:\n limit = dist\n inImage.List.set(\"Limit\", limit) # Save on info list\n inImage.List.set(\"dropNeg\", dropNeg) \n\n # Update output header\n d = outImage.Desc.Dict\n d['ctype'][2] = 'SPECLOGF'\n d['crval'][2] = refFreq\n outImage.Desc.Dict = d;\n outImage.UpdateDesc(err)\n\n # Merge CCs to temp cc table\n tmpCCver = Image.PGetHighVer(inImage, \"AIPS CC\") + 1;\n inTab = inImage.NewTable(Image.READONLY, \"AIPS CC\", inCCVer, err)\n noParms = inTab.Desc.List.Dict[\"NO_PARMS\"][2][0]\n tmpTab = inImage.NewTable(Image.WRITEONLY, \"AIPS CC\", tmpCCver, err, noParms=noParms)\n TableUtil.PCCMerge(inTab, tmpTab, err)\n # Fix spectrum if needed\n if terms:\n nterm = len(terms)\n Obit.TableCCUtilFixTSpec(inImage.me, tmpCCver, \\\n refFreq, nterm, terms,\n startCC, endCC, err.me)\n if err.isErr:\n OErr.printErrMsg(err, \"Error Adjusting spectrum of CC Table\")\n # Convert\n outImage.me = Obit.ImageUtilT2Spec(inImage.me, outImage.me, nTerm, tmpCCver, \\\n outCCVer, startCC, endCC, err.me)\n if err.isErr:\n OErr.printErrMsg(err, \"Error Converting image/CC Table\")\n # Delete temporary CC table\n inImage.ZapTable(\"AIPS CC\", tmpCCver, err)\n # Do history for spectrum modification\n pgmName = OSystem.PGetPgmName()\n outHistory = History.History(\"history\", outImage.List, err)\n History.POpen(outHistory, History.READWRITE, err)\n History.PTimeStamp(outHistory,\" Start Obit \"+pgmName,err)\n if terms:\n History.PWriteRec(outHistory,-1,pgmName+\" nterm = \"+str(nterm),err)\n History.PWriteRec(outHistory,-1,pgmName+\" refFreq = \"+str(refFreq ),err)\n History.PWriteRec(outHistory,-1,pgmName+\" terms = \"+str(terms),err)\n History.PClose(outHistory, err)", "def extract_data(mdp,lnum):\n if lnum > -1:\n mdp.corr_file.seek(0)\n lfsp = mdp.corr_file.read().split('\\n') # read data\n cdat = []\n ## -- choose input type\n for i in range(lnum,lnum+mdp.corr_len):\n if mdp.input_type == \"real\":\n cdat.append(float(lfsp[i].split('\\t')[1]))\n elif mdp.input_type == \"imag\":\n cdat.append(float(lfsp[i].split('\\t')[2]))\n elif mdp.input_type == \"mod\":\n temp1=float(lfsp[i].split('\\t')[1])\n temp2=float(lfsp[i].split('\\t')[2])\n cdat.append(math.sqrt(temp1*temp1+temp2*temp2))\n if df.do_baryon:\n bpar = [ (np.power(-1,t) if t>=mdp.corr_len/2 else 1) for t in range(mdp.corr_len) ]\n cdat = [x*y for x,y in zip(cdat,bpar)]\n return cdat\n return []", "def read(self, timestamp=None):\n grbs = pygrib.open(self.filename)\n\n grid = self.subgrid\n\n return_img = {}\n return_metadata = {}\n\n var_msg_lut = {p: None for p in self.parameter}\n sea_mask = None\n for N in range(grbs.messages):\n n = N + 1\n message = grbs.message(n)\n param_name = str(message.cfVarNameECMF)\n\n if param_name == \"lsm\":\n if self.mask_seapoints and sea_mask is None:\n sea_mask = message.values.flatten()\n\n if param_name not in self.parameter:\n continue\n else:\n var_msg_lut[param_name] = n\n\n # available variables\n shape = None\n for param_name, n in var_msg_lut.items():\n if n is None:\n continue\n\n return_metadata[param_name] = {}\n\n message = grbs.message(n)\n\n param_data = message.values.flatten()\n if not shape:\n shape = param_data.shape\n return_img[param_name] = param_data\n\n if grid is None:\n lats, lons = message.latlons()\n try:\n res_lat, res_lon = get_grid_resolution(lats, lons)\n grid = ERA_RegularImgGrid(res_lat, res_lon)\n except ValueError: # when grid not regular\n lons_gt_180 = np.where(lons > 180.0)\n lons[lons_gt_180] = lons[lons_gt_180] - 360\n grid = ERA_IrregularImgGrid(lons, lats)\n\n return_metadata[param_name][\"units\"] = message[\"units\"]\n return_metadata[param_name][\"long_name\"] = \\\n message[\"parameterName\"]\n\n if \"levels\" in message.keys():\n return_metadata[param_name][\"depth\"] = \"{:} cm\".format(\n message[\"levels\"])\n\n if self.mask_seapoints:\n if sea_mask is None:\n raise IOError(\n \"No land sea mask parameter (lsm) in passed image\"\n \" for masking.\")\n else:\n # mask the loaded data\n for name in return_img.keys():\n param_data = return_img[name]\n param_data = np.ma.array(\n param_data,\n mask=np.logical_not(sea_mask),\n fill_value=np.nan,\n )\n param_data = param_data.filled()\n return_img[name] = param_data\n\n grbs.close()\n\n # missing variables\n for param_name, n in var_msg_lut.items():\n if n is not None:\n continue\n param_data = np.full(shape, np.nan)\n warnings.warn(\"Cannot load variable {var} from file {thefile}. \"\n \"Filling image with NaNs.\".format(\n var=param_name, thefile=self.filename))\n return_img[param_name] = param_data\n return_metadata[param_name] = {}\n return_metadata[param_name][\"long_name\"] = lookup(\n self.product, [param_name]).iloc[0][\"long_name\"]\n\n if self.array_1D:\n return Image(\n grid.activearrlon,\n grid.activearrlat,\n return_img,\n return_metadata,\n timestamp,\n )\n else:\n nlat = np.unique(grid.activearrlat).size\n nlon = np.unique(grid.activearrlon).size\n\n for key in return_img:\n return_img[key] = return_img[key].reshape((nlat, nlon))\n\n return Image(\n grid.activearrlon.reshape(nlat, nlon),\n grid.activearrlat.reshape(nlat, nlon),\n return_img,\n return_metadata,\n timestamp,\n )", "def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h", "def get_textimage(fname):\n \tfrom string import atoi,atof\n \tinfile = open(fname)\n\tlines = infile.readlines()\n\tinfile.close()\n\tdata = lines[0].split()\n\tnx = atoi(data[0])\n\tny = atoi(data[1])\n\tnz = atoi(data[2])\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\te.to_zero()\n\tfor line in lines[1:]:\n\t\tdata = line.split()\n\t\tix = atoi(data[0])\n\t\tiy = atoi(data[1])\n\t\tiz = atoi(data[2])\n\t\tval = atof(data[3])\n\t\te[ix,iy,iz] = val\n\treturn e", "def compute_descriptor(img):\n rects = detector(img,1)\n shape = sp68(img,rects[0])\n comp = facerec.compute_face_descriptor(img,shape)\n a = []\n for i in comp: \n a.append(i)\n return a", "def _readcvcfile(self, cvcfilepath):\n _datatype, t_begin = self._parse_cvcfile(cvcfilepath)\n # Get cvc data from file.\n cvc_dtype = self.__get_cvc_dtype()\n print(\"Reading cvcfile: {}\".format(cvcfilepath))\n with open(cvcfilepath, 'rb') as fin:\n datafromfile = numpy.fromfile(fin, dtype=cvc_dtype)\n return datafromfile, t_begin", "def init_from_header(self, hdr, nofn=False):\n\n global filtfn, fmtch, ftypes\n\n self.hdr = hdr\n getwcs = False\n\n try:\n self.target = hdr['OBJECT']\n except KeyError:\n pass\n\n for d in ('DATE-OBS', 'DATE', '_ATE'):\n try:\n self.date = Time(hdr[d]).datetime\n break\n except KeyError:\n pass\n if self.date is None:\n raise RemFitsErr(\"No date found in hheader\")\n\n for t in ('CCDTEMP', 'TEMPCHIP'):\n try:\n self.ccdtemp = hdr[t]\n break\n except KeyError:\n pass\n if self.ccdtemp is None:\n raise RemFitsErr(\"No temperature found in hheader\")\n\n dfmtd = self.date.strftime(\"%d/%m/%Y @ %H:%M:%S\")\n\n try:\n self.filter = hdr['FILTER']\n except KeyError:\n pass\n\n if nofn:\n if self.filter is None:\n raise RemFitsErr(\"No filename and no filter given\")\n self.ftype = \"REMIR file\"\n getwcs = True\n self.description = \"REMIR file dated \" + dfmtd\n try:\n self.endx = self.ncolumns = hdr['NAXIS1']\n self.endy = self.nrows = hdr['NAXIS2']\n except KeyError:\n raise RemFitsErr(\"Dimensions of data not given in FITS header\")\n elif self.filter in remir_types:\n self.description = \"REMIR Image file dated \" + dfmtd\n self.ftype = 'Image'\n self.startx = self.starty = 0\n self.endx = self.endy = 512\n getwcs = True\n else:\n try:\n ifname = hdr['FILENAME']\n except KeyError:\n raise RemFitsErr(\"No internal filename in FITS header\")\n\n mtches = fmtch.match(ifname)\n if mtches is None:\n if self.filter is None:\n raise RemFitsErr(\"No filter given and no decipherable filename\")\n try:\n self.ftype = ftypes[ifname[0]][0]\n except KeyError:\n self.ftype = 'Processed image'\n getwcs = True\n else:\n ft, quad = mtches.groups()\n hfilt = filtfn[quad]\n try:\n self.ftype, getwcs = ftypes[ft]\n except KeyError:\n self.ftype = 'Processed image'\n getwcs = True\n if self.filter is None:\n self.filter = hfilt\n elif hfilt != self.filter:\n raise RemFitsErr(\"Conflig on filter types between \" + self.filter + \" and internal filename \" + ifname + \" suggesting \" + hfilt)\n\n self.description = self.ftype + \" dated \" + dfmtd\n\n try:\n self.startx = hdr['startX']\n self.starty = hdr['startY']\n self.endx = hdr['endX']\n self.endy = hdr['endY']\n self.ncolumns = self.endx - self.startx\n self.nrows = self.endy - self.starty\n except KeyError:\n warnings.warn(\"Had to insert geometry\", UserWarning, stacklevel=5)\n self.startx, self.starty, self.ncolumns, self.nrows = remdefaults.get_geom(self.date, self.filter)\n self.endx = self.startx + self.ncolumns\n self.endy = self.starty + self.nrows\n\n if self.startx >= 1024:\n if self.filter not in 'gr':\n raise RemFitsErr(\"Filter \" + self.filter + \" not expected to be on right of CCD\")\n else:\n if self.filter not in 'iz':\n raise RemFitsErr(\"Filter \" + self.filter + \" not expected to be on left of CCD\")\n if self.starty >= 1024:\n if self.filter not in 'gi':\n raise RemFitsErr(\"Filter \" + self.filter + \" not expected to be on top of CCD\")\n else:\n if self.filter not in 'rz':\n raise RemFitsErr(\"Filter \" + self.filter + \" not expected to be on bottom of CCD\")\n\n if getwcs:\n self.wcs = wcscoord.wcscoord(hdr)", "def readcif(filename, **kwds):\n \n # Read the unit cell parameters\n a, b, c, alf, bet, gam = [[]]*6\n with open(filename, 'r') as f:\n \n for line in f:\n if \"length_a\" in line:\n a = numgrab(line)\n elif \"length_b\" in line:\n b = numgrab(line)\n elif \"length_c\" in line:\n c = numgrab(line)\n elif \"angle_alpha\" in line:\n alf = numgrab(line)\n elif \"angle_beta\" in line:\n bet = numgrab(line)\n elif \"angle_gamma\" in line:\n gam = numgrab(line)\n \n crystVec = a + b + c + alf + bet + gam\n \n # Read atomic coordinates\n cifdata = pd.read_csv(filename, delim_whitespace=True, header=None, **kwds)\n atomLabels = np.array(cifdata.values[:,0], dtype='str')\n coords = np.array(cifdata.values[:,1:4]).astype('float64')\n\n return atomLabels, coords, crystVec", "def __init__(self, encut, magmom, ldaul, Uparam, Jparam, name=\"DFTCL_settings\"):\n\n cl_settings = {\"ISPIN\": 2, \"MAGMOM\": magmom, \"SAXIS\": None, \"LSORBIT\": None, \"LNONCOLLINEAR\": None}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIMX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=cl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"encut\", encut)", "def read_kitti_Tr_velo_to_cam(filename):\n\n with open(filename) as f:\n for line in f:\n data = line.split(' ')\n if data[0] == 'Tr_velo_to_cam:':\n calib = np.array([float(x) for x in data[1:13]])\n calib = calib.reshape(3, 4)\n return _extend_matrix(calib)\n\n raise Exception(\n 'Could not find entry for P2 in calib file {}'.format(filename))", "def read_ceilometer_file(self, calibration_factor: float | None = None) -> None:\n header, data_lines = self._read_common_header_part()\n header.append(self._read_header_line_3(data_lines[3]))\n self.metadata = self._handle_metadata(header)\n self.data[\"range\"] = self._calc_range()\n hex_profiles = self._parse_hex_profiles(data_lines[4:20])\n self.data[\"beta_raw\"] = self._read_backscatter(hex_profiles)\n self.data[\"calibration_factor\"] = calibration_factor or 1.0\n self.data[\"beta_raw\"] *= self.data[\"calibration_factor\"]\n self.data[\"zenith_angle\"] = np.median(self.metadata[\"zenith_angle\"])", "def get_image_data(img):\n\tfrom EMAN2 import EMNumPy\n\treturn EMNumPy.em2numpy(img)", "def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata", "def calibration(self) -> int:", "def preprocess_image(filename, side='blue', flatcor = 'yes', \r\n remove_cosmics=True, trace=None):\r\n\r\n assert(flatcor in ['yes', 'no'])\r\n\r\n if trace is None:\r\n trace = det_pars[side]['trace']\r\n\r\n # Need to define an instrument translation file in iraf 2.16.1\r\n iraf.unlearn('setinst')\r\n iraf.setinst.instrument = 'kpnoheaders'\r\n iraf.setinst.review = 'no'\r\n iraf.setinst.mode = 'h'\r\n iraf.setinst()\r\n\r\n # bias subtraction using the overscan\r\n hdr = pyfits.getheader(filename)\r\n iraf.unlearn('ccdproc')\r\n iraf.ccdproc.zerocor = \"no\"\r\n iraf.ccdproc.flatcor = flatcor\r\n iraf.ccdproc.fixpix = \"no\"\r\n iraf.hedit(filename, 'GAIN', det_pars[side]['gain'], \r\n update=\"yes\", verify=\"no\", show=\"no\")\r\n iraf.hedit(filename, 'RON', det_pars[side]['readnoise'], \r\n update=\"yes\", verify=\"no\", show=\"no\")\r\n if side == 'blue':\r\n # update the header\r\n iraf.hedit(filename, 'DISPAXIS', 2, update=\"yes\", verify=\"no\", add=\"yes\", show=\"no\")\r\n # trim the specified region\r\n iraf.ccdproc.biassec = hdr['BSEC1']\r\n iraf.ccdproc.trimsec = \"[%d:%d,*]\" % (trace-100, trace+100)\r\n iraf.ccdproc.function = \"spline3\"\r\n iraf.ccdproc.order = 3\r\n else:\r\n # update the header\r\n iraf.hedit(filename, 'DISPAXIS', 1, update=\"yes\", verify=\"no\", add=\"yes\", show='no')\r\n # trim the specified region\r\n iraf.ccdproc.biassec = det_pars['red']['biassec']\r\n tsec_x = hdr['TSEC1'].split(',')[0]\r\n iraf.ccdproc.trimsec = tsec_x + \",%d:%d]\" % (trace-100, trace+100)\r\n iraf.ccdproc.function = \"legendre\"\r\n iraf.ccdproc.order = 1\r\n iraf.ccdproc.ccdtype = \"\"\r\n iraf.ccdproc.darkcor = \"no\"\r\n iraf.ccdproc.niterate = 3\r\n iraf.ccdproc(filename,\r\n flat=\"flat_%s_%s\" % (side, hdr['APERTURE']))\r\n\r\n if (side == 'blue') and ('FIXPIX' not in hdr):\r\n iraf.fixpix('blue????.fits', \"bluebpm\")\r\n\r\n if 'OBSERVAT' not in hdr:\r\n # update the headers\r\n iraf.asthedit(filename, BASE_DIR + '/cal/DBSP.hdr')\r\n\r\n # remove cosmic rays with LA Cosmic\r\n if remove_cosmics and ('COSMIC' not in hdr) and (hdr['EXPTIME'] > 60) and \\\r\n (hdr['TURRET'] == 'APERTURE'):\r\n array, header = pyfits.getdata(filename, header=True)\r\n c = cosmics.cosmicsimage(array, gain=det_pars[side]['gain'], \r\n readnoise=det_pars[side]['readnoise'], \r\n sigclip = 4.5, sigfrac = 0.5, objlim = 2.0, satlevel=60000,\r\n skyOrder = 0, objectOrder = 0)\r\n c.run(maxiter = 3)\r\n #header.update('COSMIC', 1, '1 if we ran LA Cosmic')\r\n header['COSMIC']= 1\r\n pyfits.writeto(filename, c.cleanarray, header, clobber=True)", "def pre_processing_image(img):\n\n #print(img.shape)\n # apply gamma correction and show the images\n #adjusted = adjust_gamma(img, gamma=0.65)\n\n adjusted = exposure.adjust_gamma(img, gamma=1.65)\n #print(adjusted.shape)\n\n # log transform of image\n\n logarithmic_corrected = exposure.adjust_log(adjusted, 1)\n #print(logarithmic_corrected.shape)\n\n # denoising\n #dst2 = cv2.fastNlMeansDenoisingColored(logarithmic_corrected, None, 10, 10, 7, 21)\n #print(dst2.shape)\n dst2 = logarithmic_corrected\n return dst2", "def load_charmm_ff_params(fname):\n with open(fname) as f:\n lines = f.readlines()\n\n comment_stripper = re.compile(r'[!\\*].*')\n ffp = ForceFieldParams(fname)\n\n current_section = None\n for i in range(len(lines)):\n # Ignore comments and blank lines\n line = comment_stripper.sub('', lines[i].strip())\n if line == '': continue\n\n tokens = line.split()\n skip_line = False\n for section in ('ATOM', 'BOND', 'ANGL', 'DIHE', 'IMPR', 'NONB', 'CMAP'):\n if tokens[0].startswith(section):\n current_section = section\n skip_line = True\n break\n\n if skip_line: continue\n\n if current_section is 'BOND':\n key1, key2 = key_names((tokens[0], tokens[1]))\n ffp.bonds[key1] = ffp.bonds[key2] = {\n 'force_constant': float(tokens[2]),\n 'equilibrium_distance': float(tokens[3])\n }\n elif current_section is 'ANGL':\n # TODO: Urey-Bradley terms\n key1, key2 = key_names((tokens[0], tokens[1], tokens[2]))\n ffp.angles[key1] = ffp.angles[key2] = {\n 'force_constant': float(tokens[3]),\n 'equilibrium_angle': float(tokens[4]) * pi / 180.0\n }\n elif current_section is 'DIHE':\n key1, key2 = key_names((tokens[0], tokens[1], tokens[2], tokens[3]))\n ffp.dihedrals[key1] = ffp.dihedrals[key2] = {\n 'force_constant': float(tokens[4]),\n 'multiplicity': float(tokens[5]),\n 'delta': float(tokens[6])\n }\n elif current_section is 'IMPR':\n key = key_names((tokens[0], tokens[1], tokens[2], tokens[3]))\n else:\n # Unknown line type\n continue\n return ffp", "def get_efermi(fn):\n try:\n f = open(fn)\n except:\n return 0\n line = f.readline()\n f.close()\n ef = float(line.split()[6])\n print('Calculated Fermi level: {0}'.format(ef))\n return ef", "def read_core_vref(self) -> float:", "def test_fc(self):\n self.assertEqual(self.nhf.metadata[\"ndim\"], 3)\n self.assertEqual(self.nhf.metadata[\"ngroup\"], 4)\n self.assertEqual(self.nhf.metadata[\"ninti\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintj\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintk\"], 6)\n self.assertEqual(self.nhf.metadata[\"nSurf\"], 6)\n self.assertEqual(self.nhf.metadata[\"nMom\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintxy\"], 19)\n self.assertEqual(self.nhf.metadata[\"npcxy\"], 144)\n self.assertEqual(self.nhf.metadata[\"iaprx\"], 4)\n self.assertEqual(self.nhf.metadata[\"iaprxz\"], 3)\n\n variantControlInfo = nhflux.FILE_SPEC_1D_KEYS_VARIANT11\n for info in variantControlInfo:\n self.assertTrue(info not in self.nhf.metadata)", "def coeff_display_M202(Nstar=1,seeing=[0.9,0.,0.],npix=npix,zenith=0,filter='r', theta=0., phi=0,corrector='corrector',x=0.,y=0.,z=0.,zernike_max_order=20,regular=False):\n hdu = genImgVallCCD(Nstar=Nstar,seeing=seeing,npix=npix,zenith=zenith,filter=filter, theta=theta,phi=phi, corrector=corrector,x=x,y=y,z=z,regular=regular)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n img = hdui.data[i][4:].reshape(npix,npix)\n img = rebin(img,(40,40))\n M20,M22,M31,M33=complexMoments(data=img,sigma=4.)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data) \n betaAll=[]\n betaErrAll=[]\n R2adjAll=[]\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,2].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n for i in range(3,6):\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].imag,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n betaAll = np.array(betaAll)\n betaErrAll = np.array(betaErrAll)\n R2adjAll = np.array(R2adjAll)\n ind = np.arange(len(betaAll[0]))\n momname = ('M20','M22.Real','M22.imag','M31.real','M31.imag','M33.real','M33.imag')\n fmtarr = ['bo-','ro-','go-','co-','mo-','yo-','ko-']\n pl.figure(figsize=(17,7))\n for i in range(3):\n pl.subplot(4,1,i+1)\n pl.errorbar(ind[1:],betaAll[i][1:],yerr = betaErrAll[i][1:],fmt=fmtarr[i])\n if i == 0:\n pl.title('x: '+str(hdu[0].header['x'])+' y: '+str(hdu[0].header['y'])+' z: '+str(hdu[0].header['z'])+' tilt: '+str(hdu[0].header['theta'])+' fwhm: '+str(hdu[0].header['s_fwhm'])+' e1: '+str(hdu[0].header['e1'])+' e2: '+str(hdu[0].header['e2']))\n pl.grid()\n pl.xlim(-1,len(betaAll[i])+1)\n pl.ylim(min(betaAll[i][1:])-0.5,max(betaAll[i][1:])+0.5)\n #pl.ylim(-0.1,0.1)\n pl.xticks(ind,('','','','','','','','','','','','','','','','','','','',''))\n pl.ylabel(momname[i])\n pl.xticks(ind,('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20'),rotation=90)\n pl.xlabel('Zernike Coefficients')\n return betaAll,betaErrAll", "def readParams(file_name):\n try:\n info = np.load(file_name,allow_pickle=True)[()]\n except FileNotFoundError:\n if file_name.split('/')[-2] == 'checkpoint':\n lfc_id_dir = '/expres/extracted/lfc_cal/lfc_id/'\n file_name = lfc_id_dir + os.path.basename(file_name)\n info = np.load(file_name,allow_pickle=True)[()]\n else:\n raise FileNotFoundError\n # Assemble information into \"fit-able\" form\n num_orders = len(info['params'])\n lines = [p[:,1] for p in info['params'] if p is not None]\n errs = [np.sqrt(cov[:,1,1]) for cov in info['cov'] if cov is not None]\n ordrs = [o for o in np.arange(86) if info['params'][o] is not None]\n waves = [w for w in info['wvln'] if w is not None]\n # I believe, but am not sure, that the wavelengths are multiplied by order\n # to separate them from when orders overlap at the edges\n waves = [wvln for order, wvln in zip(ordrs,waves)]\n ordrs = [np.ones_like(x) * m for m,x in zip(ordrs, lines)]\n\n x = np.concatenate(lines)\n y = np.concatenate(ordrs)\n e = np.concatenate(errs)\n w = np.concatenate(waves)\n # Note: default of pipeline includes ThAr lines, which we're not including here\n \n return (x,y,w,e)", "def FeatureExtraction(Label, In, Ic, W, K=128, Fs=6, Delta=8):\n\n # get total regions\n NumofLabels = Label.max()\n\n # get Label size x\n size_x = Label.shape[0]\n\n # initialize centroids\n CentroidX = []\n CentroidY = []\n\n # initialize morphometry features\n Area = []\n Perimeter = []\n Eccentricity = []\n Circularity = []\n MajorAxisLength = []\n MinorAxisLength = []\n Extent = []\n Solidity = []\n\n # initialize FSD feature group\n FSDGroup = np.zeros((NumofLabels, Fs))\n\n # initialize Nuclei, Cytoplasms\n Nuclei = [[] for i in range(NumofLabels)]\n Cytoplasms = [[] for i in range(NumofLabels)]\n\n # create round structuring element\n Disk = disk(Delta)\n\n # initialize panda dataframe\n df = pd.DataFrame()\n\n # fourier descriptors, spaced evenly over the interval 1:K/2\n Interval = np.round(\n np.power(\n 2, np.linspace(0, math.log(K, 2)-1, Fs+1, endpoint=True)\n )\n ).astype(np.uint8)\n\n # extract feature information\n for region in regionprops(Label):\n # add centroids\n CentroidX = np.append(CentroidX, region.centroid[0])\n CentroidY = np.append(CentroidY, region.centroid[1])\n # add morphometry features\n Area = np.append(Area, region.area)\n Perimeter = np.append(Perimeter, region.perimeter)\n Eccentricity = np.append(Eccentricity, region.eccentricity)\n if region.perimeter == 0:\n Circularity = np.append(Circularity, 0)\n else:\n Circularity = np.append(\n Circularity,\n 4 * math.pi * region.area / math.pow(region.perimeter, 2)\n )\n MajorAxisLength = np.append(MajorAxisLength, region.major_axis_length)\n MinorAxisLength = np.append(MinorAxisLength, region.minor_axis_length)\n Extent = np.append(Extent, region.extent)\n Solidity = np.append(Solidity, region.solidity)\n # get bounds of dilated nucleus\n bounds = GetBounds(region.bbox, Delta, size_x)\n # grab nucleus mask\n Nucleus = (\n Label[bounds[0]:bounds[1], bounds[2]:bounds[3]] == region.label\n ).astype(np.uint8)\n # find nucleus boundaries\n Bounds = np.argwhere(\n find_boundaries(Nucleus, mode=\"inner\").astype(np.uint8) == 1\n )\n # calculate and add FSDs\n FSDGroup[region.label-1, :] = FSDs(\n Bounds[:, 0], Bounds[:, 1],\n K, Interval\n )\n # generate object coords for nuclei and cytoplasmic regions\n Nuclei[region.label-1] = region.coords\n # get mask for all nuclei in neighborhood\n Mask = (\n Label[bounds[0]:bounds[1], bounds[2]:bounds[3]] > 0\n ).astype(np.uint8)\n # remove nucleus region from cytoplasm+nucleus mask\n cytoplasm = (\n np.logical_xor(Mask, dilation(Nucleus, Disk))\n ).astype(np.uint8)\n # get list of cytoplasm pixels\n Cytoplasms[region.label-1] = GetPixCoords(cytoplasm, bounds)\n\n # calculate hematoxlyin features, capture feature names\n HematoxylinIntensityGroup = IntensityFeatureGroup(In, Nuclei)\n HematoxylinTextureGroup = TextureFeatureGroup(In, Nuclei)\n HematoxylinGradientGroup = GradientFeatureGroup(In, Nuclei)\n # calculate eosin features\n EosinIntensityGroup = IntensityFeatureGroup(Ic, Cytoplasms)\n EosinTextureGroup = TextureFeatureGroup(Ic, Cytoplasms)\n EosinGradientGroup = GradientFeatureGroup(Ic, Cytoplasms)\n\n # add columns to dataframe\n df['X'] = CentroidX\n df['Y'] = CentroidY\n\n df['Area'] = Area\n df['Perimeter'] = Perimeter\n df['Eccentricity'] = Eccentricity\n df['Circularity'] = Circularity\n df['MajorAxisLength'] = MajorAxisLength\n df['MinorAxisLength'] = MinorAxisLength\n df['Extent'] = Extent\n df['Solidity'] = Solidity\n\n for i in range(0, Fs):\n df['FSD' + str(i+1)] = FSDGroup[:, i]\n\n for f in HematoxylinIntensityGroup._fields:\n df['Hematoxylin' + f] = getattr(HematoxylinIntensityGroup, f)\n\n for f in HematoxylinTextureGroup._fields:\n df['Hematoxylin' + f] = getattr(HematoxylinTextureGroup, f)\n\n for f in HematoxylinGradientGroup._fields:\n df['Hematoxylin' + f] = getattr(HematoxylinGradientGroup, f)\n\n for f in EosinIntensityGroup._fields:\n df['Cytoplasm' + f] = getattr(EosinIntensityGroup, f)\n\n for f in EosinTextureGroup._fields:\n df['Cytoplasm' + f] = getattr(EosinTextureGroup, f)\n\n for f in EosinGradientGroup._fields:\n df['Cytoplasm' + f] = getattr(EosinGradientGroup, f)\n\n return df", "def mce_filter(freq, f_raw, params):\n\tz = np.exp(-2j*np.pi*freq/f_raw)\n\tb11, b12, b21, b22 = np.array(params[:4])*0.5**14\n\tH = (1+z)**4 / (1-b11*z+b12*z**2) / (1-b21*z+b22*z**2)\n\tH /= 2**4 / (1-b11+b12) / (1-b21+b22)\n\treturn H", "def read_param_phil(self):\n\n # LABELIT target file settings\n if self.target_phil is None:\n self.write_default_phil()\n self.phil.ctr.SetValue(self.target_phil)\n\n # Resolution limits\n # \"Try/except\" for backwards compatibility\n try:\n lowres = self.params.cctbx_ha14.resolution_limits.low\n hires = self.params.cctbx_ha14.resolution_limits.high\n self.res_limits.lowres.SetValue(str(lowres))\n self.res_limits.hires.SetValue(str(hires))\n except AttributeError:\n pass\n\n # Target options\n # \"Try/except\" for backwards compatibility\n try:\n t_uc = self.params.cctbx_ha14.target_unit_cell\n t_lat = self.params.cctbx_ha14.target_lattice_type\n l_idx = self.target_lattice.ctr.FindString(str(t_lat))\n t_ctype = self.params.cctbx_ha14.target_centering_type\n if t_ctype == 'P':\n c_idx = 1\n elif t_ctype == 'C':\n c_idx = 2\n elif t_ctype == 'I':\n c_idx = 3\n elif t_ctype == 'R':\n c_idx = 4\n elif t_ctype == 'F':\n c_idx = 5\n else:\n c_idx = 0\n if t_uc is not None:\n uc_str = [str(i) for i in t_uc.parameters()]\n self.target_uc.cell.SetValue(' '.join(uc_str))\n self.target_lattice.ctr.SetSelection(l_idx)\n self.target_centering.ctr.SetSelection(c_idx)\n except AttributeError:\n pass\n\n # Grid search options\n idx = self.gs_type.ctr.FindString(self.params.cctbx_ha14.grid_search.type)\n self.set_grid_search(idx=idx)\n self.signal_search.SetValue(self.params.cctbx_ha14.grid_search.sig_height_search)\n\n # # Selection options\n # self.select_only.SetValue(self.params.cctbx_ha14.selection.select_only.flag_on)\n # self.img_objects_path.Enable(self.select_only.GetValue())\n\n idx = self.select_by.ctr.FindString(self.params.cctbx_ha14.selection.select_by)\n self.select_by.ctr.SetSelection(idx)\n\n self.min_sigma.sigma.SetValue(str(self.params.cctbx_ha14.selection.min_sigma))\n\n # Selection filters\n if self.params.cctbx_ha14.selection.prefilter.flag_on:\n pg = self.params.cctbx_ha14.selection.prefilter.target_pointgroup\n ut = self.params.cctbx_ha14.selection.prefilter.target_uc_tolerance\n rs = self.params.cctbx_ha14.selection.prefilter.min_resolution\n rf = self.params.cctbx_ha14.selection.prefilter.min_reflections\n if self.params.cctbx_ha14.selection.prefilter.target_unit_cell is not None:\n try:\n uc = self.params.cctbx_ha14.selection.prefilter.target_unit_cell.parameters()\n except AttributeError:\n uc = None\n else:\n uc = None\n\n if str(pg).lower() != 'none':\n self.filt_lattice.toggle_boxes()\n self.filt_lattice.lattice.SetValue(str(pg))\n if str(uc).lower() != 'none':\n self.filt_uc.toggle_boxes()\n self.filt_uc.a.SetValue(str(uc[0]))\n self.filt_uc.b.SetValue(str(uc[1]))\n self.filt_uc.c.SetValue(str(uc[2]))\n self.filt_uc.alpha.SetValue(str(uc[3]))\n self.filt_uc.beta.SetValue(str(uc[4]))\n self.filt_uc.gamma.SetValue(str(uc[5]))\n self.filt_uc.tolerance.SetValue(str(ut))\n if str(rs).lower() != 'none':\n self.filt_res.toggle_boxes()\n self.filt_res.res.SetValue(str(rs))\n if str(rf).lower() != 'none':\n self.filt_ref.toggle_boxes()\n self.filt_ref.ref.SetValue(str(rf))", "def cam_read(filename):\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n assert check == TAG_FLOAT, ' cam_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)\n M = np.fromfile(f,dtype='float64',count=9).reshape((3,3))\n N = np.fromfile(f,dtype='float64',count=12).reshape((3,4))\n return M,N", "def test_mag_form_fac_case1():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac()[0], ion.calc_mag_form_fac()[1:]\n del _temp\n assert (abs(np.sum(formfac) - 74.155233575216599) < 1e-12)", "def _decode_header(self):\n #header = self.file_content[0:6]\n log_screen_descr = self.file_content[6:13]\n self.canvas_width = log_screen_descr[0] + (log_screen_descr[1]<<8)\n self.canvas_height = log_screen_descr[2] + (log_screen_descr[3]<<8)\n # is there a global color table? (usually yes)\n flags = log_screen_descr[4]\n self.glob_col_table = (flags & 0b10000000) != 0\n\n # determine the number of bits per primary color value\n self.color_resolution = (flags & 0b01110000) >> 4\n self.bits_per_pixel = self.color_resolution + 1\n\n # If the value is 1, then the colors in the global color table are sorted\n # in order of \"decreasing importance,\" which typically means \"decreasing\n # frequency\" in the image\n self.sort_flag = (flags & 0b00001000) != 0\n\n # If this value is N, then the actual table size is 2^(N+1).\n self.glob_col_table_sz = 1 << ((flags & 0b00000111)+1)\n\n self.bg_color_index = log_screen_descr[5]\n self.pix_asp_ratio = log_screen_descr[6]", "def _read_calibration_data(self):\n #Declare global variables.\n global calT1\n global calT2\n global calT3\n global calP1\n global calP2\n global calP3\n global calP4\n global calP5\n global calP6\n global calP7\n global calP8\n global calP9\n global calP10\n global calH1\n global calH2\n global calH3\n global calH4\n global calH5\n global calH6\n global calH7\n global calGH1\n global calGH2\n global calGH3\n global calResHeatRange\n global calResHeatVal\n global calRangeSwErr\n\n #Temperature calibration.\n calT1 = self._read_2bytes_as_ushort_lsbfirst(self.BME680_T1_LSB_REG)\n calT2 = self._read_2bytes_as_short_lsbfirst(self.BME680_T2_LSB_REG)\n calT3 = self._read_register_1sbyte(self.BME680_T3_REG)\n\n #Pressure calibration.\n calP1 = self._read_2bytes_as_ushort_lsbfirst(self.BME680_P1_LSB_REG)\n calP2 = self._read_2bytes_as_short_lsbfirst(self.BME680_P2_LSB_REG)\n calP3 = self._read_register_1sbyte(self.BME680_P3_REG)\n calP4 = self._read_2bytes_as_short_lsbfirst(self.BME680_P4_LSB_REG)\n calP5 = self._read_2bytes_as_short_lsbfirst(self.BME680_P5_LSB_REG)\n calP6 = self._read_register_1sbyte(self.BME680_P6_REG)\n calP7 = self._read_register_1sbyte(self.BME680_P7_REG)\n calP8 = self._read_2bytes_as_short_lsbfirst(self.BME680_P8_LSB_REG)\n calP9 = self._read_2bytes_as_short_lsbfirst(self.BME680_P9_LSB_REG)\n calP10 = self._read_register_1ubyte(self.BME680_P10_REG)\n\n #Humidity calibration.\n calH1 = self._read_register_1ubyte(self.BME680_H1_MSB_REG) << 4 | (self._read_register_1ubyte(self.BME680_H1_LSB_REG) & 0x0F)\n calH2 = self._read_register_1ubyte(self.BME680_H2_MSB_REG) << 4 | ((self._read_register_1ubyte(self.BME680_H2_LSB_REG)) >> 4)\n calH3 = self._read_register_1sbyte(self.BME680_H3_REG)\n calH4 = self._read_register_1sbyte(self.BME680_H4_REG)\n calH5 = self._read_register_1sbyte(self.BME680_H5_REG)\n calH6 = self._read_register_1ubyte(self.BME680_H6_REG)\n calH7 = self._read_register_1sbyte(self.BME680_H7_REG)\n\n #Gas calibration.\n calGH1 = self._read_register_1sbyte(self.BME680_GH1_REG)\n calGH2 = self._read_2bytes_as_short_lsbfirst(self.BME680_GH2_LSB_REG)\n calGH3 = self._read_register_1sbyte(self.BME680_GH3_REG)\n\n #Heat calibration.\n calResHeatRange = (self._read_register_1ubyte(self.BME680_RES_HEAT_RANGE) & 0x30) / 16\n calResHeatVal = self._read_register_1sbyte(self.BME680_RES_HEAT_VAL)\n calRangeSwErr = (self._read_register_1sbyte(self.BME680_RANGE_SW_ERR) & 0xF0) / 16", "def _readFT1(self):\n\n mainHead = fits.getheader(self._ft1, ext=0)\n dataHead = fits.getheader(self._ft1, ext=1)\n irfsPass = {'P8R2': 'P8R2_SOURCE_V6'}\n\n # --------------------------------------------------------------------------------------------- #\n # Fill the class attributes with informations from the data file\n if os.path.dirname(self.ft1) == '':\n # Get the absolute name to the current path\n self.datapath = os.getcwd()\n self.workpath = os.getcwd() # by default it's the same\n else:\n self.datapath = os.path.dirname(self.ft1)\n self.workpath = os.getcwd() #os.path.dirname(self.ft1)\n if 'NoPulse' in os.path.basename(self.ft1):\n self.frac = float(os.path.basename(self.ft1).split('_')[-1].replace('.fits', '')) \n self.tstart = Time(mainHead['DATE-OBS'], format='isot', scale='utc')\n self.tstop = Time(mainHead['DATE-END'], format='isot', scale='utc')\n self.metstart = mainHead['TSTART']\n self.metstop = mainHead['TSTOP']\n for i in dataHead.keys():\n if 'DSTYP' in i:\n if 'ENERGY' in dataHead[i]:\n self.emin = float(dataHead['DSVAL'+i[-1]].split(':')[0])\n self.emax = float(dataHead['DSVAL'+i[-1]].split(':')[1])\n break\n self.nevents = dataHead['NAXIS2']\n self.passver = dataHead['PASS_VER']\n try:\n self.irf = irfsPass[self.passver]\n except:\n print(\"\\t=== self.irf needs to be filled manually===\")\n for i in dataHead.keys():\n if isinstance(dataHead[i], str):\n if 'BIT_MASK(EVENT_CLASS' in dataHead[i]:\n self.evclass = dataHead['DSTYP'+i[-1]].split(',')[1]\n break\n for i in dataHead.keys():\n if isinstance(dataHead[i], str):\n if 'BIT_MASK(EVENT_TYPE' in dataHead[i]:\n self.evtype = dataHead['DSTYP'+i[-1]].split(',')[1]\n break\n else:\n self.evtype = None\n firstFound = False\n for i in dataHead.keys():\n if (dataHead[i] == 'POS(RA,DEC)') and (not firstFound):\n pointingInfo = dataHead['DSVAL'+i[-1]].split('(')[1].split(')')[0].split(',')\n firstFound = True \n elif (dataHead[i] == 'POS(RA,DEC)') and (firstFound):\n # The FT1 has two positions informations (classic...)\n # Need to remove the second one \n print(\"\\t=== Multiple central postions found, removing the secondary ===\")\n hdus = fits.open(self._ft1)\n hdus['EVENTS'].header.remove('DSTYP' + i[-1])\n hdus['EVENTS'].header.remove('DSUNI' + i[-1])\n hdus['EVENTS'].header.remove('DSVAL' + i[-1])\n # Rename the remaining keywords\n for j in dataHead.keys():\n if j[:5] in ['DSVAL', 'DSUNI', 'DSTYP', 'DSREF']:\n if int(j[-1]) > int(i[-1]):\n hdus['EVENTS'].header.rename_keyword(j, j[:5]+str(int(j[-1])-1), force=True)\n hdus['EVENTS'].header.set('NDSKEYS', hdus['EVENTS'].header['NDSKEYS']-1)\n hdus.writeto(self._ft1, clobber=True) # replace the existing FT1\n break\n else:\n pass\n self.ra = float(pointingInfo[0])\n self.dec = float(pointingInfo[1])\n self.rad = float(pointingInfo[2])\n if 'FT1_filtered' not in os.path.basename(self.ft1):\n # It's not at least a processed filtered FT1 file\n self.fermicat = os.path.join(self.datapath, 'gll_psc_v16.fit')\n self.model = os.path.join(self.datapath, os.path.basename(self.ft1[:-5])+'_Model.xml')\n return", "def preprocess(image):\n return (image / 255) * 2 - 1", "def read_is(filename):\n with open(filename, 'rb') as f:\n print(f'Reading {filename}')\n print(f'Reading Header...')\n is_type = [struct.unpack('c', f.read(1))[0].decode('utf-8')\n for i in range(4)]\n is_type = ''.join(is_type)\n if is_type not in ['IS01', 'IS02', 'IS03']:\n print(f'{is_type} : Invalid IS type, please check that '\n 'input file is a Inverse Solution matrix')\n raise ValueError\n print(f'IS type: {is_type}')\n n_channels = struct.unpack('I', f.read(4))[0]\n print(f'n_channels: {n_channels}')\n numsolutionpoints = struct.unpack('I', f.read(4))[0]\n print(f'n_solutionpoints: {numsolutionpoints}')\n numregularizations = struct.unpack('I', f.read(4))[0]\n print(f'n_regularizations: {numregularizations}')\n isinversescalar = struct.unpack('c', f.read(1))[0]\n if isinversescalar == b'\\x01':\n n_dim = 1\n print(f'Inverse solution is Scalar')\n elif isinversescalar == b'\\x00':\n print(f'Inverse solution is Vectorial')\n n_dim = 3\n else:\n raise ValueError(f'isinversescalar must be either 1 for scalar, '\n f'either 0 for vectorial, but '\n f'{ord(isinversescalar)} found.')\n\n if is_type in ['IS01', 'IS02']:\n buf = f.read(n_dim * numsolutionpoints * n_channel * 4)\n data = np.frombuffer(buf, dtype=np.float32)\n data = data.reshape(numsolutionpoints, ndim, n_channel)\n data = no.array([data])\n data = np.swapaxes(data, 1, 2)\n\n elif is_type == 'IS03':\n print(f\"Reading Variable Header...\")\n\n ch_names = []\n for _ in range(n_channels):\n name = [char for char in f.read(32).split(b'\\x00')\n if char != b''][0]\n ch_names.append(name.decode('utf-8'))\n\n solutionpoints_names = []\n for _ in range(numsolutionpoints):\n name = [char for char in f.read(16).split(b'\\x00')\n if char != b''][0]\n solutionpoints_names.append(name.decode('utf-8'))\n\n regularizations_values = []\n for _ in range(numregularizations):\n value = struct.unpack('d', f.read(8))[0]\n regularizations_values.append(value)\n print(f'Regularizations values: {regularizations_values}')\n\n regularizations_names = []\n for _ in range(numregularizations):\n name = [char for char in f.read(32).split(b'\\x00')\n if char != b''][0]\n regularizations_names.append(name.decode('utf-8'))\n print(f'Regularizations names: {regularizations_names}')\n\n regularisation_solutions = []\n buf = f.read(numregularizations\n * n_dim\n * numsolutionpoints\n * n_channels\n * 4)\n data = np.frombuffer(buf, dtype=np.float32)\n data = data.reshape(numregularizations, numsolutionpoints,\n n_dim, n_channels)\n data = np.swapaxes(data, 1, 2)\n\n regularisation_solutions = np.array(regularisation_solutions)\n inverse_solution = {'is_type': is_type,\n 'is_scalar': True if isinversescalar == \"0\" else False,\n 'ch_names': ch_names,\n 'solutionpoints_names': solutionpoints_names,\n 'regularizations_values': regularizations_values,\n 'regularizations_names': regularizations_names,\n 'regularisation_solutions': data}\n return(inverse_solution)", "def read_data(filename):\n # Store debug mode\n debug = params.debug\n params.debug = None\n\n # Initialize dictionary\n header_dict = {}\n\n headername = filename + \".hdr\"\n\n with open(headername, \"r\") as f:\n # Replace characters for easier parsing\n hdata = f.read()\n hdata = hdata.replace(\",\\n\", \",\")\n hdata = hdata.replace(\"\\n,\", \",\")\n hdata = hdata.replace(\"{\\n\", \"{\")\n hdata = hdata.replace(\"\\n}\", \"}\")\n hdata = hdata.replace(\" \\n \", \"\")\n hdata = hdata.replace(\";\", \"\")\n hdata = hdata.split(\"\\n\")\n\n # Loop through and create a dictionary from the header file\n for i, string in enumerate(hdata):\n if ' = ' in string:\n header_data = string.split(\" = \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n elif ' : ' in string:\n header_data = string.split(\" : \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n\n # Reformat wavelengths\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"{\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"}\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\" \", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].split(\",\")\n\n # Create dictionary of wavelengths\n wavelength_dict = {}\n for j, wavelength in enumerate(header_dict[\"wavelength\"]):\n wavelength_dict.update({float(wavelength): float(j)})\n\n # Replace datatype ID number with the numpy datatype\n dtype_dict = {\"1\": np.uint8, \"2\": np.int16, \"3\": np.int32, \"4\": np.float32, \"5\": np.float64, \"6\": np.complex64,\n \"9\": np.complex128, \"12\": np.uint16, \"13\": np.uint32, \"14\": np.uint64, \"15\": np.uint64}\n header_dict[\"data type\"] = dtype_dict[header_dict[\"data type\"]]\n\n # Read in the data from the file\n raw_data = np.fromfile(filename, header_dict[\"data type\"], -1)\n\n # Reshape the raw data into a datacube array\n array_data = raw_data.reshape(int(header_dict[\"lines\"]),\n int(header_dict[\"bands\"]),\n int(header_dict[\"samples\"])).transpose((0, 2, 1))\n\n if \"default bands\" in header_dict:\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"{\", \"\")\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"}\", \"\")\n default_bands = header_dict[\"default bands\"].split(\",\")\n\n pseudo_rgb = cv2.merge((array_data[:, :, int(default_bands[0])],\n array_data[:, :, int(default_bands[1])],\n array_data[:, :, int(default_bands[2])]))\n\n else:\n max_wavelength = max([float(i) for i in wavelength_dict.keys()])\n min_wavelength = min([float(i) for i in wavelength_dict.keys()])\n # Check range of available wavelength\n if max_wavelength >= 635 and min_wavelength <= 490:\n id_red = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 710)\n id_green = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 540)\n id_blue = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 480)\n\n pseudo_rgb = cv2.merge((array_data[:, :, [id_blue]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n else:\n # Otherwise take 3 wavelengths, first, middle and last available wavelength\n id_red = int(header_dict[\"bands\"]) - 1\n id_green = int(id_red / 2)\n pseudo_rgb = cv2.merge((array_data[:, :, [0]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n\n # Gamma correct pseudo_rgb image\n pseudo_rgb = pseudo_rgb ** (1 / 2.2)\n # Scale each of the channels up to 255\n pseudo_rgb = cv2.merge((rescale(pseudo_rgb[:, :, 0]),\n rescale(pseudo_rgb[:, :, 1]),\n rescale(pseudo_rgb[:, :, 2])))\n\n max_wl = float(str(header_dict[\"wavelength\"][-1]).rstrip())\n min_wl = float(str(header_dict[\"wavelength\"][0]).rstrip())\n\n # Create an instance of the spectral_data class\n spectral_array = Spectral_data(array_data=array_data, max_wavelength=max_wl,\n min_wavelength=min_wl, d_type=header_dict[\"data type\"],\n wavelength_dict=wavelength_dict, samples=int(header_dict[\"samples\"]),\n lines=int(header_dict[\"lines\"]), interleave=header_dict[\"interleave\"],\n wavelength_units=header_dict[\"wavelength units\"], array_type=\"datacube\",\n pseudo_rgb=pseudo_rgb, filename=filename)\n\n # Reset debug mode\n params.debug = debug\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(pseudo_rgb)\n elif params.debug == \"print\":\n print_image(pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_pseudo_rgb.png\"))\n\n return spectral_array", "def feature_extraction(img, feature):\n\n if feature == 'HoG':\n # HoG parameters\n\n # In the case of the Hog Feature, we already given the base parameters for using hog feature function.\n # TA - You can just use that parameter with each subdivide image (which has image grid size * image grid size)\n # Thank you for the reply. Does it mean to divide the image into 20x20 size sub-images and perform the feature extraction on each image??\n # TA - Yes. In the SIFT, image grid size is different.\n\n win_size = (32, 32)\n block_size = (32, 32)\n block_stride = (16, 16)\n cell_size = (16, 16)\n\n nbins = 9\n deriv_aperture = 1\n win_sigma = 4\n histogram_norm_type = 0\n l2_hys_threshold = 2.0000000000000001e-01\n gamma_correction = 0\n nlevels = 64\n\n # Your code here. You should also change the return value.\n\n # sample visualizing\n # cv2.imshow('img', img)\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n hog = cv2.HOGDescriptor(win_size,\n block_size,\n block_stride,\n cell_size,\n nbins,\n deriv_aperture,\n win_sigma,\n histogram_norm_type,\n l2_hys_threshold,\n gamma_correction,\n nlevels)\n\n # additional parameters\n\n #hist = hog.compute(gray,winStride,padding,locations)\n\n #TODO: Check if this is valid???\n\n hist = hog.compute(gray)\n hist_resized = np.resize(hist, (int(len(hist)/36), 36))\n hist_resized\n return hist_resized\n\n elif feature == 'SIFT':\n\n # Your code here. You should also change the return value.\n\n #input image size 240 * 200 ==> divide H, W by 20 ==> 12 * 10 = 120\n #in case of this input image, the number of feature is 120.\n #So the number of feature is changed according to input image size.\n\n #IF PROBLEMS WITH DEPENDENCIES: pip3 install opencv-contrib-python==3.4.2.16\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n sift = cv2.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(gray, None)\n\n return des", "def test_find_details(self):\n input_image = np.array([\n 0.01, 0.1, 0.2, 0.5, 0.75, 0.99\n ])\n blur_input_image = np.array([\n 0.28641213, 0.32315277, 0.3871898, 0.46174035, 0.52684723, 0.56466555\n ])\n expected_image_lower = np.array([\n -0.277, -0.224, -0.188, 0.038, 0.223, 0.425\n ])\n expected_image_upper = np.array([\n -0.276, -0.223, -0.187, 0.039, 0.224, 0.426\n ])\n output = localHDR.find_details(input_image, blur_input_image)\n self.assertTrue(np.allclose(output, expected_image_lower, atol=6e-03))\n self.assertTrue(np.allclose(output, expected_image_upper, atol=6e-03))" ]
[ "0.56780773", "0.56284475", "0.55668586", "0.5510405", "0.5448828", "0.5425909", "0.5414353", "0.54138607", "0.541011", "0.54011285", "0.5374928", "0.5361909", "0.5342145", "0.533246", "0.5314134", "0.5313142", "0.525259", "0.52504194", "0.5208928", "0.520613", "0.5202701", "0.51924586", "0.51777726", "0.51571214", "0.51507735", "0.512887", "0.5126869", "0.5121457", "0.5117099", "0.510186", "0.50991404", "0.5093974", "0.50879806", "0.50853753", "0.50812507", "0.5079875", "0.506039", "0.50534445", "0.5035637", "0.5034339", "0.50274587", "0.50213623", "0.5017874", "0.50161785", "0.50136745", "0.50068176", "0.49998462", "0.4989725", "0.4986154", "0.4985155", "0.49851128", "0.49839422", "0.49823543", "0.49666095", "0.4952434", "0.4951739", "0.49516848", "0.49515092", "0.49459973", "0.49401253", "0.4939845", "0.49394304", "0.4938727", "0.4937046", "0.49356616", "0.4931594", "0.49313563", "0.49273455", "0.49231133", "0.49228832", "0.4921687", "0.49198458", "0.4914381", "0.49011046", "0.4900983", "0.48999843", "0.489974", "0.4895046", "0.48874557", "0.48857233", "0.48846942", "0.4880707", "0.4880666", "0.48765913", "0.48685646", "0.4864707", "0.4864021", "0.48624763", "0.4861035", "0.48591116", "0.48570734", "0.48566633", "0.48512194", "0.48501083", "0.48497415", "0.48470283", "0.48433885", "0.48400554", "0.48314825", "0.4829988" ]
0.60774076
0
generate EMAN2 CTF object using values of CTF parameters given in the list p
def generate_ctf(p): from EMAN2 import EMAN2Ctf defocus = p[0] cs = p[1] voltage = p[2] pixel_size = p[3] bfactor = p[4] amp_contrast = p[5] if defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention defocus *= 1e-4 if amp_contrast < 1.0: from math import sqrt amp_contrast = amp_contrast*100/sqrt(2*amp_contrast**2-2*amp_contrast+1) ctf = EMAN2Ctf() if(len(p) == 6): ctf.from_dict({"defocus":defocus, "cs":cs, "voltage":voltage, "apix":pixel_size, "bfactor":bfactor, "ampcont":amp_contrast}) else: ctf.from_dict({"defocus":defocus, "cs":cs, "voltage":voltage, "apix":pixel_size, "bfactor":bfactor, "ampcont":amp_contrast,'dfdiff':p[6],'dfang':p[7]}) return ctf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_thermo(T_C, p):\n\ty = y_from_p(p)\n\tx = x_from_Tp(T_C+C_to_K, p)\n\treturn x, y", "def construct_param_dict(params,K_RC,K_CP,m_P):\n ###scaling constants\n w=params['w']\n pd=params['pd'] # in 3D and 0.21 in 2D\n pv=params['pv']\n Er=params['Er'] ;Ek=params['Ek']\n ER=params['ER'];EC=params['EC'];EP=params['EP'];\n Eq1=params['Eq1'];Eq2=params['Eq2']\n\n\n #capture success function\n a = params['a']\n b = params['b']\n c = params['c']\n formC = params['formC']\n formPC = params['formPC']\n formPR = params['formPR']\n \n ###variables\n TR= params['TR'] ;TC= params['TC'];TP=params['TP'];D_R= params['D_R']; D_C= params['D_C']\n K_RP=K_RC*K_CP\n fmC=params['fmC'];thermyR=params['thermyR']\n thermyC=params['thermyC'];thermyP=params['thermyP']\n fmPR=params['fmPR']\n fmPC=params['fmPC']\n m_C = K_CP*m_P;m_R = K_RP*m_P\n ###normalization constants and boltzmann constant\n r0 = params['r0']\n k0 = params['k0'] # will depend on the productivity of the habitat\n a01 = a02 = params['a012'] # will depedend on the dimension of the habitat \n a03 = params['a03']\n d0= params['d0']\n q10 = params['q10'];q20 = params['q20'];\n v0R = params['v0R'];v0C =params['v0C'];v0P =params['v0P'];k = b_k\n hC0 = params['hC0'];hP0 = params['hP0'] \n \n #intrapopulation parameters\n q1=set_q1(q10,m_C,w,Eq1,TR,k)\n q2=set_q2(q20,m_P,w,Eq2,TC,k)\n K=set_K(k0,m_R,w,Ek,TR,k)\n r=set_r(r0,m_R,w,Er,TR,k)\n\n #interpopulation parameters\n a1=set_alfa(m_C,a01,K_RC,pv,pd,TR,TC,ER,EC,D_R,v0R,v0C,g,alfa,fmC,thermyR,thermyC,k,a,b,c,formC)\n a2=set_alfa(m_P,a02,K_RP,pv,pd,TR,TP,ER,EP,D_R,v0R,v0P,g,alfa,fmPR,thermyR,thermyP,k,a,b,c,formPR)\n a3=set_alfa(m_P,a03,K_CP,pv,pd,TC,TP,EC,EP,D_C,v0C,v0P,g,alfa,fmPC,thermyC,thermyP,k,a,b,c,formPC)\n\n t_hp = set_th(hP0,m_P,w,EP,k,TP)\n t_hc = set_th(hC0,m_C,w,EC,k,TC)\n param_dict={'q1':q1,'q2':q2,'K':K,'r':r,'a1':a1,'a2':a2,'a3':a3,'t_hp':t_hp,'t_hc':t_hc}\n \n return param_dict", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def second_class_tp(p,n):\n c = np.zeros(n)\n d = np.zeros(p)\n ucon = np.zeros(n)\n lcon = np.zeros(n)\n \n #uvar = np.ones(n)*1\n uvar = np.ones(n)*5\n lvar = np.ones(n)*0.5\n name = str(p)+'_'+str(n)+'_'+str(n)+'_l1_tp'+'.txt'\n #name = str(n)+'_'+str(p)+'_'+'_second_tp'+'.txt'\n Q = rog.hilb(p,n)\n # d=(di), di=sum qij for i= 1,...,p\n for i in range(p): \n d[i]= Q[i,:].sum()\n B = np.zeros((n,n))\n return Q,B,d,c,lcon,ucon,lvar,uvar,name", "def addPppParams(model):\n \n ### GAPDP Parameters ####\n model.addParameter('GAPDP','KmSub2',0.385) # nadp\n model.addParameter('GAPDP','KmProd2',0.202) # nadph\n model.addParameter('GAPDP','kcatF',2.8)\n model.addParameter('GAPDP','kcatR',0)\n\n ### FMETTRS Parameters ###\n model.addParameter('FMETTRS','kcatF',0.45)\n\n ### MTHFC Parameters ###\n model.addParameter('MTHFC','kcatF',185)\n\n #### GHMT2 Paramters ####\n model.addParameter('GHMT2','kcatF',0.0)\n model.addParameter('GHMT2','kcatR',0.0)\n \n #### TKT1 Parameters ####\n model.addParameter('TKT1',rxnFormKey='kcatF',value=20.58)\n model.addParameter('TKT1',rxnFormKey='kcatR',value=0.8)\n \n model.addParameter('TKT1',rxnFormKey='KmSub1',value=0.743) #g3p\n model.addParameter('TKT1',rxnFormKey='KmSub2',value=3.7298) #s7p\n model.addParameter('TKT1',rxnFormKey='KmProd1',value=0.4717) #r5p\n model.addParameter('TKT1',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TKT2 Parameters ####\n model.addParameter('TKT2',rxnFormKey='kcatF',value=26.87)\n model.addParameter('TKT2',rxnFormKey='kcatR',value=1.4)\n \n model.addParameter('TKT2',rxnFormKey='KmSub1',value=0.25) #f6p\n model.addParameter('TKT2',rxnFormKey='KmSub2',value=0.743) #g3p\n model.addParameter('TKT2',rxnFormKey='KmProd1',value=0.0227) #e4p\n model.addParameter('TKT2',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TALA Parameters ####\n model.addParameter('TALA',rxnFormKey='kcatF',value=22.3)\n model.addParameter('TALA',rxnFormKey='kcatR',value=0.54)\n \n model.addParameter('TALA',rxnFormKey='KmSub1',value=0.0401) #e4p\n model.addParameter('TALA',rxnFormKey='KmSub2',value=0.6688) #f6p\n model.addParameter('TALA',rxnFormKey='KmProd1',value=1.9) #g3p\n model.addParameter('TALA',rxnFormKey='KmProd2',value=0.285) #s7p\n\n \n #### Speed up DGSN Pathway ####\n model.addParameter('DGSNK',rxnFormKey='kcatF',value=2.25)\n\n #### Speed up DADN pathway ####\n model.addParameter('PUNP2',rxnFormKey='kcatF',value=13.3)\n\n #### Speed up FBA rxn ####\n #model.addParameter('FBA',rxnFormKey='kcatF',value=64.5)\n\n model.addParameter('RNDR2',rxnFormKey='KmSub1',value=0.24)\n\n \n# #### RPI Parameters ####\n model.addParameter('RPI',rxnFormKey='kcatF',value=10.0)\n model.addParameter('RPI',rxnFormKey='kcatR',value=1.0)\n \n #model.addParameter('RPI',rxnFormKey='KmSub1',value=1.0)\n #model.addParameter('RPI',rxnFormKey='KmProd1',value=1.0)\n \n model.addParameter('FBA',rxnFormKey='KmSub1',value=0.12)\n model.addParameter('FBA',rxnFormKey='KmProd2',value=0.05)\n \n \n model.addParameter('GAPD',rxnFormKey='kcatF',value=442.0) \n model.addParameter('GAPD',rxnFormKey='kcatR',value=73.6) \n \n\n model.addParameter('FBA',rxnFormKey='kcatR',value=12.6)\n \n\n model.addParameter('TPI',rxnFormKey='kcatR',value=67)\n \n model.addParameter('TPI',rxnFormKey='KmSub1',value=0.077)\n model.addParameter('TPI',rxnFormKey='KmProd1',value=0.084) \n \n\n model.addParameter('FBA',rxnFormKey='kcatF',value=21.0)\n \n \n model.addParameter('PGK',rxnFormKey='kcatR',value=3.4)\n \n model.addParameter('PGM',rxnFormKey='KmSub1',value=3.6)\n model.addParameter('PGM',rxnFormKey='KmProd1',value=0.2)\n \n \n model.addParameter('PGK',rxnFormKey='KmSub1',value=0.01)\n model.addParameter('PGK',rxnFormKey='KmProd1',value=0.1)\n \n \n model.addParameter('GAPD',rxnFormKey='KmProd1',value=0.47)\n model.addParameter('GAPD',rxnFormKey='KmProd2',value=0.061)\n \n \n model.addParameter('DRPA',rxnFormKey='kcatR',value=34.0)\n \n model.addParameter('DRPA',rxnFormKey='KmProd1',value=0.267)\n model.addParameter('DRPA',rxnFormKey='KmProd2',value=0.2)\n\n \n model.addParameter('PPM2',rxnFormKey='kcatF',value=173)\n \n model.addParameter('PPM2',rxnFormKey='KmSub1',value=0.013)\n model.addParameter('PPM2',rxnFormKey='KmProd1',value=1.2)\n\n\n\n# print('Updated PPP Parameters')\n\n return", "def pose_pair_construct(p1,n1,p2,n2):\n v1 = p2-p1; v1 /= np.linalg.norm(v1)\n R1 = tf_construct(n1,v1)\n return RigidTransform.from_Rt(R1, p1)", "def set_ctf(ima, p):\n\tfrom utilities import generate_ctf\n\tctf = generate_ctf( p )\n\tima.set_attr( \"ctf\", ctf )", "def get_ctf(ima):\n\tfrom EMAN2 import EMAN2Ctf\n\tctf_params = ima.get_attr(\"ctf\")\t\n\treturn ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang", "def gen_params(no_cultures):\n # Plate level\n kn = 0.1 # Nutrient diffusion\n ks = 0.1 # Signal diffusion\n b = 0.05 # Signal on cells effect constant\n a = 0.05 # Signal secretion constant\n # Culture level\n # Growth rate constant\n r_mean = 1.0\n r_var = 1.0\n r_params = [max(0.0, gauss(r_mean, r_var)) for i in range(no_cultures)]\n params = np.array([kn, ks, b, a] + r_params)\n return params", "def gen_reaction(tabs):\n global pbeam\n pbeam = TLorentzVector(0, 0, Ebeam, Ebeam)\n global ptarg\n ptarg = TLorentzVector(0, 0, 0, m_proton)\n pinitial = pbeam + ptarg\n global s\n s = pinitial.Mag2()\n q_in = (s - m_proton**2) / (2 * math.sqrt(s))\n q_cm = math.sqrt((s - m_proton**2 + m_omega**2)**2 / (4 * s) - m_omega**2)\n EomegaCM = math.sqrt(m_omega**2 + q_cm**2)\n EprotonCM = math.sqrt(m_proton**2 + q_cm**2)\n costhetaCM = (2 * q_in * EomegaCM - m_omega**2 - tabs) / (2 * q_in * q_cm)\n if abs(costhetaCM) > 1:\n print \"tabs =\", tabs, \"is out of range, please try another value\"\n return 0\n costheta0 = random.Uniform(-1, 1)\n phi0 = random.Uniform(-math.pi, math.pi)\n costheta1 = random.Uniform(-1, 1)\n phi1 = random.Uniform(-math.pi, math.pi)\n pomega = gen_omega(costheta0, phi0, costheta1, phi1)\n sinthetaCM = math.sqrt(1 - costhetaCM**2)\n beta = TVector3(q_cm * sinthetaCM, 0, q_cm * costhetaCM) * (1 / EomegaCM)\n pomega.Boost(beta)\n pgamma[0].Boost(beta)\n pgamma[1].Boost(beta)\n pgamma[2].Boost(beta)\n global precoil\n precoil = TLorentzVector(-q_cm * sinthetaCM, 0, -q_cm * costhetaCM, EprotonCM)\n betaCM = pinitial.Vect() * (1 / pinitial[3])\n pgamma[0].Boost(betaCM)\n pgamma[1].Boost(betaCM)\n pgamma[2].Boost(betaCM)\n pomega.Boost(betaCM)\n precoil.Boost(betaCM)\n return pomega", "def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, buck=None, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n if buck is None:\n self.buck_pms = []\n else:\n self.buck_pms = [] # TODO:\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh,\n self.eps_ult, *self.buck_pms, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def new_param_energy(coords, params, topology, vecs, P=1.01, T=293.15,NPT=False,V=None,P_conv=1.e5,V_conv=1.e-6,Ener_conv=1.e-3,N_part=250.):\n\n #-------------------\n # CONSTANTS\n #-------------------\n kB = 0.0083145 #Boltzmann constant (Gas constant) in kJ/(mol*K)\n beta = 1/(kB*T)\n\n #-------------------\n # PARAMETERS\n #-------------------\n params = params\n\n mol2files = []\n for i in params:\n mol2files.append('../monomers/'+i.rsplit(' ',1)[0]+'.mol2')\n\n flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield\n mols = []\n mol = oechem.OEMol()\n for mol2file in mol2files:\n ifs = oechem.oemolistream(mol2file)\n ifs.SetFlavor( oechem.OEFormat_MOL2, flavor)\n mol = oechem.OEGraphMol()\n while oechem.OEReadMolecule(ifs, mol):\n oechem.OETriposAtomNames(mol)\n mols.append(oechem.OEGraphMol(mol))\n K = len(params['cyclohexane'].keys())\n \n # Load forcefield file\n #ffxml = 'smirnoff99Frosst_with_AllConstraints.ffxml'#\n #print('The forcefield being used is smirnoff99Frosst_with_AllConstraints.ffxml')\n ffxml = get_data_filename('forcefield/smirnoff99Frosst.ffxml')\n print('The forcefield being used is smirnoff99Frosst.ffxml')\n\n ff = ForceField(ffxml)\n\n # Generate a topology\n top = topology#generateTopologyFromOEMol(mol)\n\n #-----------------\n # MAIN\n #-----------------\n\n # Calculate energies\n\n E_kn = np.zeros([K,len(coords)],np.float64)\n u_kn = np.zeros([K,len(coords)],np.float64)\n for i,j in enumerate(params):\n AlkEthOH_id = j\n for k,l in enumerate(params[AlkEthOH_id]):\n print(\"Anotha one\")\n for m,n in enumerate(params[AlkEthOH_id][l]):\n newparams = ff.getParameter(smirks=n[0])\n newparams[n[1]]=n[2]\n ff.setParameter(newparams,smirks=n[0])\n system = ff.createSystem(top,mols,nonbondedMethod=PME,nonbondedCutoff=1.125*nanometers,ewaldErrorTolerance=1.e-5)\n barostat = MonteCarloBarostat(P*bar, T*kelvin, 10)\n system.addForce(barostat)\n for o,p in enumerate(coords):\n e = get_energy(system,p,vecs[o])\n \n if not NPT:\n E_kn[k,o] = e._value\n u_kn[k,o] = e._value*beta\n else:\n E_kn[k,o] = e._value + P*P_conv*V[o]*V_conv*Ener_conv*N_part\n u_kn[k,o] = (e._value + P*P_conv*V[o]*V_conv*Ener_conv*N_part)*beta\n \n return E_kn,u_kn", "def __init__(self, coefficient, basefield=None):\n\n # parameter parse\n try:\n character = basefield.getCharacteristic()\n field = basefield\n except AttributeError:\n # backward compatibility\n if isinstance(basefield, int):\n field = finitefield.FinitePrimeField.getInstance(basefield)\n character = basefield\n else:\n raise ValueError(\"basefield must be FiniteField object.\")\n\n coeffs_list = []\n if isinstance(coefficient, list):\n for c in coefficient:\n if isinstance(c, int):\n coeff = field.createElement(c)\n elif c in field:\n coeff = c\n else:\n raise ValueError(\"coefficient not in basefield.\")\n coeffs_list.append(coeff)\n\n # general initialize\n ECGeneric.__init__(self, coeffs_list, field)\n\n zero = self.basefield.zero\n one = self.basefield.one\n\n # format attribute\n if self.ch == 2:\n if len(self) == 5:\n # FIXME\n if coeffs_list[0] % 2 == one and coeffs_list[2] % 2 == coeffs_list[3] % 2 == zero and coeffs_list[4]:\n self.a1 = one\n self.a2 = coeffs_list[1]\n self.a3 = zero\n self.a4 = zero\n self.a6 = coeffs_list[4]\n self.b2 = one\n self.b4 = zero\n self.b6 = zero\n self.b8 = self.a6\n self.c4 = one\n self.c6 = one\n self.disc = self.a6\n self.j = self.disc.inverse()\n elif coeffs_list[0] % 2 == coeffs_list[1] % 2 == zero and coeffs_list[2]:\n self.a1 = zero\n self.a2 = zero\n self.a3 = coeffs_list[2]\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = zero\n self.b4 = zero\n self.b6 = self.a3**2\n self.b8 = self.a4**2\n self.c4 = zero\n self.c6 = zero\n self.disc = self.a3**4\n self.j = zero\n else:\n raise ValueError(\"coefficient may be not representation of EC.\")\n else:\n raise ValueError(\"coefficient may only use full Weierstrass form for characteristic 2.\")\n elif self.ch == 3: # y^2=x^3+a2*x^2+a6 or y^2=x^3+a4*x+a6\n # FIXME\n if len(self) == 5:\n if coeffs_list[0] % 3 == coeffs_list[2] % 3 == coeffs_list[3] % 3 == 0 and coeffs_list[1] and coeffs_list[4]:\n self.a1 = zero\n self.a2 = coeffs_list[1]\n self.a3 = zero\n self.a4 = zero\n self.a6 = coeffs_list[4]\n self.b2 = self.a2\n self.b4 = zero\n self.b6 = self.a6\n self.b8 = self.a2*self.a6\n self.c4 = self.b2**2\n self.c6 = 2*self.b2**3\n self.disc = -self.a2**3*self.a6\n self.j = (-self.a2**3)*self.a6.inverse()\n elif coeffs_list[0] == coeffs_list[1] == coeffs_list[2] == 0 and coeffs_list[3]:\n self.a1 = zero\n self.a2 = zero\n self.a3 = zero\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = zero\n self.b4 = 2*self.a4\n self.b6 = self.a6\n self.b8 = 2*self.a4**2\n self.c4 = zero\n self.c6 = zero\n self.disc = -self.a4**3\n self.j = zero\n else:\n raise ValueError(\"can't defined EC.\")\n if not self.disc:\n raise ValueError(\"this curve is singular.\")\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n else:\n if len(self) == 5:\n self.a1 = coeffs_list[0]\n self.a2 = coeffs_list[1]\n self.a3 = coeffs_list[2]\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = self.a1**2+4*self.a2\n self.b4 = self.a1*self.a3+2*self.a4\n self.b6 = self.a3**2+4*self.a6\n self.b8 = self.a1**2*self.a6+4*self.a2*self.a6-self.a1*self.a3*self.a4+self.a2*self.a3**2-self.a4**2\n self.c4 = self.b2**2-24*self.b4\n self.c6 = -self.b2**3+36*self.b2*self.b4-216*self.b6\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n if self.disc:\n self.j = self.c4**3*self.disc.inverse()\n else:\n raise ValueError(\"coefficients creates singular curve.\")\n elif len(self) == 2:\n self.a = coeffs_list[0]\n self.b = coeffs_list[1]\n self.a1 = zero\n self.a2 = zero\n self.a3 = zero\n self.a4 = self.a\n self.a6 = self.b\n self.b2 = zero\n self.b4 = 2*self.a\n self.b6 = 4*self.b\n self.b8 = -(self.a**2)\n self.c4 = -48*self.a\n self.c6 = -864*self.b\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n if self.disc:\n self.j = self.c4**3*self.disc.inverse()\n else:\n raise ValueError(\"coefficients creates singular curve.\")\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n\n self.ord = None\n self.abelian = None\n self.cubic = UniVarPolynomial({0:self.a6, 1:self.a4, 2:self.a2, 3:one},\n self.basefield)", "def generation(hid_pl, f_state, eps_z, eps_x, pd, fd):\n params_prior = fd['phi_prior'](hid_pl)\n z = sample(params_prior, eps_z, 'gauss')\n phi_z = fd['phi_z'](z)\n params_out = fd['phi_dec'](phi_z, hid_pl)\n x = sample(params_out, eps_x, pd['model'])\n\n phi_x = fd['phi_x'](x)\n f_in = tf.concat([phi_x, phi_z], axis=1, name='f_theta_joint_inputs')\n f_out, f_state = fd['f_theta'](f_in, f_state)\n return x, f_out, f_state", "def eff_param():\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)", "def create_weka_mfcc_13():\n global ARGS\n\n ## ten thu muc can trich chon vector dac trung (RLS, LMS, NLMS, Kalman, Non)\n name = '';\n fout = open('weka/MFCC78_TUNNING_{}_dataset.arff'.format(name), 'w')\n fout.write('@RELATION {}_dataset\\n\\n'.format(name))\n\n fout.write('@ATTRIBUTE MEAN_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE class \t{'+ARGS.labels+'}\\n\\n')\n \n fout.write('@DATA\\n')\n\n ## cua so\n windowing = Windowing(type='hamming',\n size=1104,\n zeroPhase=False)\n \n ## quang pho\n spectrum = Spectrum(size=1104)\n\n ##khoi tao MFCC\n mfcc = MFCC(highFrequencyBound=4000, ## gioi han tren cua tan so\n inputSize=201, \t\t\t ## kich thuoc pho dau vao\n lowFrequencyBound=0,\t ## gioi han duoi cua tan so\n numberBands=40,\t\t\t ## so luong cac dai Mels trong bo loc\n numberCoefficients=13, ## so luong dau ra cac he so Mel\n sampleRate=16000)\t\t ## tan so lay mau\n\n for label in ARGS.labels.split(','): ## duyet cac thu muc giong voi ten nhan\n\n ## dia chi thu muc\n dir = os.path.join(ARGS.dir, label)\n\n logging.info('Access folder <{}>'.format(dir))\n\n for file in sorted(os.listdir(dir)):\n\n \t## duyet cac file .wav\n if file.endswith('.wav'):\n logging.info('Process <{}>'.format(file))\n path = os.path.join(dir, file)\n \n ## doc file am thanh\n loader = MonoLoader(filename=path, sampleRate=ARGS.sampleRate)\n audio = loader()\n cnt = 0\n\n for window in FrameGenerator(audio, \n frameSize=ARGS.window_length*ARGS.sampleRate/1000, \n hopSize=ARGS.window_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n mfccs = []\n for frame in FrameGenerator(window, \n frameSize=ARGS.frame_length*ARGS.sampleRate/1000, \n hopSize=ARGS.frame_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n s = spectrum(windowing(frame))\n\n _, m = mfcc(s)\n\n m_delta = librosa.feature.delta(m, order=1) ## dao ham bac 1\n m_delta_delta = librosa.feature.delta(m, order=2) ## dao ham bac 2\n\n m_all = np.concatenate((m, m_delta, m_delta_delta), axis=0) ## them vao chuoi\n mfccs.append(m_all)\n mfccs = np.array(mfccs)\n mfccs_mean = np.mean(mfccs, axis=0)\n mfccs_std = np.std(mfccs, axis=0)\n feat = np.concatenate((mfccs_mean, mfccs_std), axis=0).tolist()\n str_feat = [str(x) for x in feat]\n line = ','.join(str_feat)+','+label\n fout.write(line+'\\n')\n cnt = cnt+1\n logging.info('{} samples'.format(cnt))", "def _gen_code(self):\r\n #TODO: maybe generate one C function only to save compile time? Also easier to take that as a basis and hand craft other covariances??\r\n\r\n #generate c functions from sympy objects \r\n argument_sequence = self._sp_x+self._sp_z+self._sp_theta\r\n code_list = [('k',self._sp_k)]\r\n # gradients with respect to covariance input\r\n code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]\r\n # gradient with respect to parameters\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]\r\n # gradient with respect to multiple output parameters\r\n if self.output_dim > 1:\r\n argument_sequence += self._sp_theta_i + self._sp_theta_j\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)]\r\n (foo_c,self._function_code), (foo_h,self._function_header) = \\\r\n codegen(code_list, \"C\",'foobar',argument_sequence=argument_sequence)\r\n #put the header file where we can find it\r\n f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w')\r\n f.write(self._function_header)\r\n f.close()\r\n\r\n # Substitute any known derivatives which sympy doesn't compute\r\n self._function_code = re.sub('DiracDelta\\(.+?,.+?\\)','0.0',self._function_code)\r\n\r\n\r\n ############################################################\r\n # This is the basic argument construction for the C code. #\r\n ############################################################\r\n \r\n arg_list = ([\"X2(i, %s)\"%x.name[2:] for x in self._sp_x]\r\n + [\"Z2(j, %s)\"%z.name[2:] for z in self._sp_z])\r\n\r\n # for multiple outputs need to also provide these arguments reversed.\r\n if self.output_dim>1:\r\n reverse_arg_list = list(arg_list)\r\n reverse_arg_list.reverse()\r\n\r\n # Add in any 'shared' parameters to the list.\r\n param_arg_list = [shared_params.name for shared_params in self._sp_theta]\r\n arg_list += param_arg_list\r\n\r\n precompute_list=[]\r\n if self.output_dim > 1:\r\n reverse_arg_list+=list(param_arg_list)\r\n split_param_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i]\r\n split_param_reverse_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i]\r\n arg_list += split_param_arg_list\r\n reverse_arg_list += split_param_reverse_arg_list\r\n # Extract the right output indices from the inputs.\r\n c_define_output_indices = [' '*16 + \"int %s=(int)%s(%s, %i);\"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])]\r\n precompute_list += c_define_output_indices\r\n reverse_arg_string = \", \".join(reverse_arg_list)\r\n arg_string = \", \".join(arg_list)\r\n precompute_string = \"\\n\".join(precompute_list)\r\n\r\n # Code to compute argments string needed when only X is provided.\r\n X_arg_string = re.sub('Z','X',arg_string)\r\n # Code to compute argument string when only diagonal is required.\r\n diag_arg_string = re.sub('int jj','//int jj',X_arg_string)\r\n diag_arg_string = re.sub('j','i',diag_arg_string)\r\n if precompute_string == '':\r\n # if it's not multioutput, the precompute strings are set to zero\r\n diag_precompute_string = ''\r\n diag_precompute_replace = ''\r\n else:\r\n # for multioutput we need to extract the index of the output form the input.\r\n diag_precompute_string = precompute_list[0]\r\n diag_precompute_replace = precompute_list[1]\r\n \r\n\r\n # Here's the code to do the looping for K\r\n self._K_code =\\\r\n \"\"\"\r\n // _K_code\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n //target[i*num_inducing+j] = \r\n TARGET2(i, j) += k(%s);\r\n }\r\n }\r\n %s\r\n \"\"\"%(precompute_string,arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n self._K_code_X = \"\"\"\r\n // _K_code_X\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n %s // int ii=(int)X2(i, 1);\r\n TARGET2(i, i) += k(%s);\r\n for (j=0;j<i;j++){\r\n %s //int jj=(int)X2(j, 1);\r\n double kval = k(%s); //double kval = k(X2(i, 0), shared_lengthscale, LENGTHSCALE1(ii), SCALE1(ii));\r\n TARGET2(i, j) += kval;\r\n TARGET2(j, i) += kval;\r\n }\r\n }\r\n /*%s*/\r\n \"\"\"%(diag_precompute_string, diag_arg_string, re.sub('Z2', 'X2', diag_precompute_replace), X_arg_string,str(self._sp_k)) #adding a string representation forces recompile when needed\r\n\r\n # Code to do the looping for Kdiag\r\n self._Kdiag_code =\\\r\n \"\"\"\r\n // _Kdiag_code\r\n // Code for computing diagonal of covariance function.\r\n int i;\r\n int N = target_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for\r\n for (i=0;i<N;i++){\r\n %s\r\n //target[i] =\r\n TARGET1(i)=k(%s);\r\n }\r\n %s\r\n \"\"\"%(diag_precompute_string,diag_arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code to compute gradients\r\n grad_func_list = []\r\n if self.output_dim>1:\r\n grad_func_list += c_define_output_indices\r\n grad_func_list += [' '*16 + 'TARGET1(%i+ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += [' '*16 + 'TARGET1(%i+jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += ([' '*16 + 'TARGET1(%i) += PARTIAL2(i, j)*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)])\r\n grad_func_string = '\\n'.join(grad_func_list) \r\n\r\n self._dK_dtheta_code =\\\r\n \"\"\"\r\n // _dK_dtheta_code\r\n // Code for computing gradient of covariance with respect to parameters.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n }\r\n }\r\n %s\r\n \"\"\"%(grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") # adding a string representation forces recompile when needed\r\n\r\n\r\n # Code to compute gradients for Kdiag TODO: needs clean up\r\n diag_grad_func_string = re.sub('Z','X',grad_func_string,count=0)\r\n diag_grad_func_string = re.sub('int jj','//int jj',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('j','i',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('PARTIAL2\\(i, i\\)','PARTIAL1(i)',diag_grad_func_string)\r\n self._dKdiag_dtheta_code =\\\r\n \"\"\"\r\n // _dKdiag_dtheta_code\r\n // Code for computing gradient of diagonal with respect to parameters.\r\n int i;\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (i=0;i<N;i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code for gradients wrt X, TODO: may need to deal with special case where one input is actually an output.\r\n gradX_func_list = []\r\n if self.output_dim>1:\r\n gradX_func_list += c_define_output_indices\r\n gradX_func_list += [\"TARGET2(i, %i) += PARTIAL2(i, j)*dk_dx_%i(%s);\"%(q,q,arg_string) for q in range(self._real_input_dim)]\r\n gradX_func_string = \"\\n\".join(gradX_func_list)\r\n\r\n self._dK_dX_code = \\\r\n \"\"\"\r\n // _dK_dX_code\r\n // Code for computing gradient of covariance with respect to inputs.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N; i++){\r\n for (j=0; j<num_inducing; j++){\r\n %s\r\n }\r\n }\r\n %s\r\n \"\"\"%(gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n \r\n\r\n diag_gradX_func_string = re.sub('Z','X',gradX_func_string,count=0)\r\n diag_gradX_func_string = re.sub('int jj','//int jj',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('j','i',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('PARTIAL2\\(i, i\\)','2*PARTIAL1(i)',diag_gradX_func_string)\r\n\r\n # Code for gradients of Kdiag wrt X\r\n self._dKdiag_dX_code= \\\r\n \"\"\"\r\n // _dKdiag_dX_code\r\n // Code for computing gradient of diagonal with respect to inputs.\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (int i=0;i<N; i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a\r\n # string representation forces recompile when needed Get rid\r\n # of Zs in argument for diagonal. TODO: Why wasn't\r\n # diag_func_string called here? Need to check that.\r\n #self._dKdiag_dX_code = self._dKdiag_dX_code.replace('Z[j', 'X[i')\r\n\r\n # Code to use when only X is provided. \r\n self._dK_dtheta_code_X = self._dK_dtheta_code.replace('Z[', 'X[')\r\n self._dK_dX_code_X = self._dK_dX_code.replace('Z[', 'X[').replace('+= PARTIAL2(', '+= 2*PARTIAL2(') \r\n self._dK_dtheta_code_X = self._dK_dtheta_code_X.replace('Z2(', 'X2(')\r\n self._dK_dX_code_X = self._dK_dX_code_X.replace('Z2(', 'X2(')\r\n\r\n\r\n #TODO: insert multiple functions here via string manipulation\r\n #TODO: similar functions for psi_stats\r", "def C_factory(P, n=2, V_type=\"clamped\"):\n\n # TODO: check that p_len is ok with the degree and > 0\n m = len(P) # the number of points in P\n D = len(P[0]) # the dimension of a point (2D, 3D)\n\n # Create the knot vector\n V = make_knot_vector(n, m, V_type)\n # TODO: check the validity of the input knot vector.\n # TODO: create an initial Vector Point.\n\n #############################################################################\n # The following line will be detailed later. #\n # We create the highest degree basis spline function, aka. our entry point. #\n # Using the recursive formulation of b-splines, this b_n will call #\n # lower degree basis_functions. b_n is a function. #\n #############################################################################\n b_n = basis_factory(n)\n\n @memoize\n def S(t, d):\n \"\"\" The b-spline funtion, as defined in eq. 3. \"\"\"\n out = 0.\n for i in range(m): #: Iterate over 0-indexed point indices\n out += P[i][d]*b_n(t, i, V)\n return out\n\n def C(t):\n \"\"\" The b-spline curve, as defined in eq. 4. \"\"\"\n out = [0.]*D #: For each t we return a list of D coordinates\n for d in range(D): #: Iterate over 0-indexed dimension indices\n out[d] = S(t,d)\n return out\n\n C.P = P #: The control polygone\n C.V = V #: The knot vector used by the function\n C.spline = S #: The spline function.\n C.basis = b_n #: The highest degree basis function. Useful to do some plotting.\n C.min = V[0] #: The domain of definition of the function, lower bound for t\n C.max = V[-1] #: The domain of definition of the function, upper bound for t\n C.endpoint = C.max!=V[-1] #: Is the upper bound included in the domain.\n return C", "def metamer(p):\r\n return Components(p, Scale=3)", "def generate_parameters(nid):\n G = EcGroup(nid)\n g = G.hash_to_point(b\"g\")\n o = G.order()\n return (g, o)", "def make_features(targs_pb, pf):\n camera, to_uvd, to_world, keys_uvd, _, visible, _ = utils.get_contents_pb(\n targs_pb.kp_target)\n num_kp = len(keys_uvd)\n # Restrict to max projection targets\n proj_targs = [\n utils.get_contents_pb(targ_pb) for targ_pb in targs_pb.proj_targets\n ][:utils.MAX_TARGET_FRAMES]\n targets_keys_uvd = []\n targets_to_uvd = []\n for proj_targ in proj_targs:\n _, to_uvd, _, keys_uvd, _, _, _ = proj_targ\n targets_keys_uvd.append(keys_uvd)\n targets_to_uvd.append(to_uvd)\n # Add dummy targets if necessary.\n num_targets = len(proj_targs)\n for _ in range(utils.MAX_TARGET_FRAMES - num_targets):\n targets_keys_uvd.append(utils.dummy_keys_uvd(num_kp))\n targets_to_uvd.append(utils.dummy_to_uvd())\n\n def feat_int(num):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[num]))\n\n def feat_floats(floats):\n return tf.train.Feature(float_list=tf.train.FloatList(value=floats))\n\n feats = {\n 'to_world_' + pf:\n feat_floats(to_world.flatten()),\n 'to_uvd_' + pf:\n feat_floats(to_uvd.flatten()),\n 'camera_' + pf:\n feat_floats(utils.cam_pb_to_array(camera)),\n 'keys_uvd_' + pf:\n feat_floats(np.array(keys_uvd).flatten()),\n 'visible_' + pf:\n feat_floats(visible),\n 'num_kp_' + pf:\n feat_int(num_kp),\n 'num_targets_' + pf:\n feat_int(num_targets),\n 'targets_to_uvd_' + pf:\n feat_floats(np.array(targets_to_uvd).flatten()),\n 'targets_keys_uvd_' + pf:\n feat_floats(np.array(targets_keys_uvd).flatten()),\n 'mirrored':\n feat_int(int(targs_pb.mirrored)),\n }\n return feats", "def gen_parameter(self, g, ng, p):\n pass", "def get_m_eng_body(f_eng_body, P):\n m = np.zeros(3)\n for i in range(0, P.eng_nb):\n m += np.cross(P.eng_pos[i], f_eng_body[i])\n return m", "def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, lsr, alpha=1.0, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n self.lsr = float(lsr)\n self.alpha = float(alpha)\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh, self.eps_ult, '-DMBuck', self.lsr, self.alpha, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def rpfp(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['rpfp']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for i in xrange(1,4):\n label = \"RPFP{0}\".format(str(i))\n distillate_label = \"L{0}-E_C{1}\".format(str(i),str(i))\n lAng_label = 'L{0}ANG'.format(str(i))\n cAng_label = 'C{0}ANG'.format(str(i))\n lMag_label = 'C{0}MAG'.format(str(i))\n cMag_label = 'C{0}MAG'.format(str(i))\n distillate_label = get_distillate_label([lAng_label, cAng_label, lMag_label, cMag_label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_lAng_label = lAng_label\n dep_lAng_name = fields['deps'][0]\n dep_lAng_uuid = self.uuid_map[lAng_label]\n dep_cAng_label = cAng_label\n dep_cAng_name = fields['deps'][1]\n dep_cAng_uuid = self.uuid_map[cAng_label]\n dep_lMag_label = lMag_label\n dep_lMag_name = fields['deps'][2]\n dep_lMag_uuid = self.uuid_map[lMag_label]\n dep_cMag_label = cMag_label\n dep_cMag_name = fields['deps'][3]\n dep_cMag_uuid = self.uuid_map[cMag_label]\n \n deps = [[dep_lAng_label, dep_lAng_name, dep_lAng_uuid],\n [dep_lMag_label, dep_lMag_name, dep_lMag_uuid],\n [dep_cAng_label, dep_cAng_name, dep_cAng_uuid],\n [dep_cMag_label, dep_cMag_name, dep_cMag_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"RPFP\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map['REAC_PWR{0}'.format(i)] = emitted[-3][-36:]\n output_uuid_map['FUND_PWR{0}'.format(i)] = emitted[-2][-36:]\n\n filename = \"{0}/RPFP_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def PyC_Punto(P):\n C = CPunto()\n\n C.x[0] = P['x'][0]\n C.x[1] = P['x'][1]\n C.x[2] = P['x'][2]\n \n C.y[0] = P['y'][0]\n C.y[1] = P['y'][1]\n C.y[2] = P['y'][2]\n\n C.D = P['D']\n \n C.n = P['n']\n C.a, C.b, C.c = P['a'], P['b'], P['c']\n C.d, C.e, C.f = P['d'], P['e'], P['f']\n\n return C", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def test_create(self):\n f = azplugins.restrain.plane(group=hoomd.group.all(), point=(0,0,0), normal=(1,0,0), k=2.0)\n\n f.set_params(k=5.0)\n f.set_params(k=8)\n\n f.set_params(point=(0,0,1))\n f.set_params(point=[0,0,1])\n f.set_params(point=np.array([0,0,1]))\n\n f.set_params(normal=(0,0,1))\n f.set_params(normal=[0,0,1])\n f.set_params(normal=np.array([0,0,1]))\n\n f.set_params(point=(0,0,0), normal=(1,0,0), k=10.0)", "def tcs(self,lpf=0, opf=1):\n S = self.M.allocState({})\n self.M.propagate(S, 0, 1)\n\n # set initial beam data\n S.ref_IonZ = self.refIonZ\n S.IonZ = self.IonZ\n\n S.moment0 = self.BC0\n S.moment1 = self.ENV0\n\n S.ref_IonEk = self.refIonEk\n \n S.phis = S.moment0[PS_S,:]\n S.IonEk = S.moment0[PS_PS,:]*MeVtoeV + S.ref_IonEk\n\n #S.clng = self.clng\n\n fin = len(self.M)\n\n \n # store initial beam data\n self.LD[0][0] = S.pos\n\n #Mean data\n self.LD[0][1] = S.moment0_env[0]\n self.LD[0][2] = S.moment0_env[2]\n self.LD[0][3] = S.moment0_env[4]\n self.LD[0][4] = S.moment0_rms[0]\n self.LD[0][5] = S.moment0_rms[2]\n self.LD[0][6] = S.moment0_rms[4]\n self.LD[0][7] = S.ref_phis\n self.LD[0][8] = S.ref_IonEk\n\n # store initial beam data\n self.LD2[0][0] = S.pos\n #Mean data\n self.LD2[0][1] = S.moment0_env[1]\n self.LD2[0][2] = S.moment0_env[3]\n self.LD2[0][3] = S.moment0_env[5]\n self.LD2[0][4] = S.moment0_rms[1]\n self.LD2[0][5] = S.moment0_rms[3]\n self.LD2[0][6] = S.moment0_rms[5]\n\n\n # propagate step by step and store beam data\n for i in range(1,len(self.M)):\n self.M.propagate(S, i, 1)\n \n \n self.LD[i][0] = S.pos\n #Mean data\n self.LD[i][1] = S.moment0_env[0]\n self.LD[i][2] = S.moment0_env[2]\n self.LD[i][3] = S.moment0_env[4]\n self.LD[i][4] = S.moment0_rms[0]\n self.LD[i][5] = S.moment0_rms[2]\n self.LD[i][6] = S.moment0_rms[4]\n self.LD[i][7] = S.ref_phis\n self.LD[i][8] = S.ref_IonEk\n\n self.LD2[i][0] = S.pos\n #Mean data\n self.LD2[i][1] = S.moment0_env[1]\n self.LD2[i][2] = S.moment0_env[3]\n self.LD2[i][3] = S.moment0_env[5]\n self.LD2[i][4] = S.moment0_rms[1]\n self.LD2[i][5] = S.moment0_rms[3]\n self.LD2[i][6] = S.moment0_rms[5]\n\n #output data for plotting\n if opf: np.savetxt('ldata.txt',self.LD)\n\n if not lpf: return S", "def derived_parameters(cls):\n return ['cgg', 'cdd', 'css', 'cbb', 'vstar', 'gain', 'ft']", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def ppl_convert(P):\n if isinstance(P, ppl.C_Polyhedron):\n return P\n gs = ppl.Generator_System()\n for v in P.vertices_list():\n gs.insert(ppl.point(sum(int(j) * ppl.Variable(i) for i,j in enumerate(v))))\n for r in P.rays_list():\n gs.insert(ppl.ray(sum(int(j) * ppl.Variable(i) for i,j in enumerate(r))))\n for l in P.lines_list():\n gs.insert(ppl.line(sum(int(j) * ppl.Variable(i) for i,j in enumerate(l))))\n return ppl.C_Polyhedron(gs)", "def pco2_maker(file):\n\t# Reads text file\n\tlines = file.readlines()\n\t# Maps value\n\tkeys = [key.strip(' ').rstrip() for key in lines[0].split(\"\\t\")]\n\tvalues = [value.strip(' ').rstrip() for value in lines[1].split(\"\\t\")]\n\n\tdata = dict(zip(keys, values))\n\t# Set up CSV writing\n\twr = csv.writer(open(\"PCO2W_Cal_File.csv\", \"w\", newline=''))\n\n\tequivalent = {\"CC_cala\" : \"SAMI_A\", \"CC_calb\" : \"SAMI_B\", \"CC_calc\" : \"SAMI_C\", \"CC_calt\" : \"AvgT\"} \n\tconstants = {\"CC_ea434\": 19706, \"CC_ea620\": 34, \"CC_eb434\": 3073, \"CC_eb620\": 44327}\n\tcsv_format = [\"CC_cala\", \"CC_calb\", \"CC_calc\", \"CC_calt\", \"CC_ea434\", \"CC_ea620\", \"CC_eb434\", \"CC_eb620\"]\n\tcsv_maker(wr, csv_format, data, constants, equivalent)", "def gen_parameters(self):\n\n print \"\\t* Adding parameters to compute template\"\n # get all the server client\n servers = self.novaclient.servers.list()\n\n # add all key_pair_names\n self.gen_key_name_parameters(servers)\n\n # add all images\n self.gen_image_parameters(servers)\n\n # add all flavors\n self.gen_flavor_parameters(servers)\n\n # add all networks\n self.gen_network_parameters()", "def _tf2_ ( self , *args ) :\n ##\n if not hasattr ( self , '_wo2' ) : self._wo2 = _WO2_ ( self )\n if not self._wo2 : self._wo2 = _WO2_ ( self )\n ## \n _wo = self._wo2\n fun = ROOT.TF2 ( funID () , _wo , *args )\n fun.SetNpx ( 100 ) \n fun.SetNpy ( 100 ) \n #\n return fun", "def CPy_Punto(C, P):\n \n P['x'][0] = C.x[0]\n P['x'][1] = C.x[1]\n P['x'][2] = C.x[2]\n\n P['y'][0] = C.y[0]\n P['y'][1] = C.y[1]\n P['y'][2] = C.y[2]\n\n P['D'] = C.D\n P['a'], P['b'], P['c'] = C.a, C.b, C.c\n P['d'], P['e'], P['f'] = C.d, C.e, C.f", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def main(codelabel):\n try:\n code = Code.get_from_string(codelabel)\n except NotExistent:\n print(\"The code '{}' does not exist\".format(codelabel))\n sys.exit(1)\n\n print(\"Testing CP2K ENERGY on H2 (DFT) without StructureData...\")\n\n # parameters\n parameters = Dict(\n dict={\n 'FORCE_EVAL': {\n 'METHOD': 'Quickstep',\n 'DFT': {\n 'BASIS_SET_FILE_NAME': 'BASIS_MOLOPT',\n 'QS': {\n 'EPS_DEFAULT': 1.0e-12,\n 'WF_INTERPOLATION': 'ps',\n 'EXTRAPOLATION_ORDER': 3,\n },\n 'MGRID': {\n 'NGRIDS': 4,\n 'CUTOFF': 280,\n 'REL_CUTOFF': 30,\n },\n 'XC': {\n 'XC_FUNCTIONAL': {\n '_': 'LDA',\n },\n },\n 'POISSON': {\n 'PERIODIC': 'none',\n 'PSOLVER': 'MT',\n },\n },\n 'SUBSYS': {\n # structure directly included in parameters\n 'CELL': {\n 'ABC': '4.0 4.0 4.75'\n },\n 'COORD': {\n ' ': ['H 2.0 2.0 2.737166', 'H 2.0 2.0 2.000000']\n },\n 'KIND': [\n {\n '_': 'O',\n 'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',\n 'POTENTIAL': 'GTH-LDA-q6'\n },\n {\n '_': 'H',\n 'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',\n 'POTENTIAL': 'GTH-LDA-q1'\n },\n ],\n },\n }\n })\n\n # resources\n options = {\n \"resources\": {\n \"num_machines\": 1,\n \"num_mpiprocs_per_machine\": 1,\n },\n \"max_wallclock_seconds\": 1 * 3 * 60,\n }\n\n inputs = {'parameters': parameters, 'code': code, 'metadata': {'options': options,}}\n\n print(\"submitted calculation...\")\n calc = run(Cp2kCalculation, **inputs)\n\n # check energy\n expected_energy = -1.14005678487\n if abs(calc['output_parameters'].dict.energy - expected_energy) < 1e-10:\n print(\"OK, energy has the expected value\")\n else:\n print(\"ERROR!\")\n print(\"Expected energy value: {}\".format(expected_energy))\n print(\"Actual energy value: {}\".format(calc['output_parameters'].dict.energy))\n sys.exit(3)\n\n sys.exit(0)", "def CPFGenerator(amount=1,cpfn=None):\n\n # randnumbers are created from 0 to 9, and multiplicated by these list for\n # fist digit\n d1weight = list(range(2,11)) # [2,3,...,10]\n d1weight.reverse()\n\n # for second digit same as for first digit, but with d1\n d2weight = list(range(2,12)) # [2,3,...,11]\n d2weight.reverse()\n\n # create how many cpfs amount says then add to set cpfs \n cpfs=set()\n\n while len(cpfs) < amount:\n # get some rand numbers\n if not cpfn:\n randns = [randint(0,9) for x in range(9)]\n else:\n randns = cpfn\n\n d1,d2 = get_digits(randns,d1weight,d2weight)\n\n # transform cpf in a string\n cpf = (\"%s\"*11) % tuple(randns+[d1,d2])\n\n # if not exist, add in cpfs\n if not cpf in cpfs:\n cpfs.add(cpf)\n\n cpfs = list(cpfs)\n if len(cpfs) != 1:\n return cpfs\n else:\n return cpfs[0]", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def __init__(self, filename, num_particles, max_iteration, maxFlip, maxTabuSize, w, c1, c2):\n #Read cnf formula from file\n self.clauses, self.num_literals, self.num_clauses = self.w_clauses_from_file(filename)\n\n #Parameters of PSO\n self.num_particles = num_particles\n self.max_iteration = max_iteration\n self.w = w\n self.c1 = c1\n self.c2 = c2\n self.max_flip = maxFlip\n\n #Tabu list parameters\n self.tabuList = []\n self.maxTabuSize = maxTabuSize\n\n #Initialize particles\n self.swarm = self.init_particles(self.num_particles, self.num_literals)\n\n #Initialize global best and it's fitness\n self.global_best = self.swarm[0].position\n self.global_best_fitness = self.fitness(self.global_best)", "def genus(P, E, F, V=None):\n\n return euler_characteristic(P, E, F, V)-2", "def __init__(self, name, params):\n # create generic technology object\n DER.__init__(self, params['name'], 'ICE', params)\n # input params UNITS ARE COMMENTED TO THE RIGHT\n self.rated_power = params['rated_power'] # kW/generator\n self.p_min = params['min_power'] # kW/generator\n self.startup_time = params['startup_time'] # default value of 0, in units of minutes\n self.efficiency = params['efficiency'] # gal/kWh\n self.fuel_cost = params['fuel_cost'] # $/gal\n self.vari_om = params['variable_om_cost'] # $/kwh\n self.fixed_om = params['fixed_om_cost'] # $/yr\n self.capital_cost = params['ccost'] # $/generator\n self.ccost_kw = params['ccost_kW']\n\n self.variable_names = {'ice_gen', 'on_ice'}\n try:\n self.n = params['n'] # generators\n self.capex = self.capital_cost * self.n + self.ccost_kw * self.rated_power * self.n\n except KeyError:\n pass", "def create_model():\n # Get list of all syllables: [\"<s>\", \"AH\", \"</s>\", \"<s>\", \"T\", ...]\n syllabifier = Syllabifier()\n all_syllables = syllabifier.all_syllables()\n\n # Count conditional probabilties of phoneme tuples\n tcf = TrigramCollocationFinder.from_words(all_syllables)\n bcf = BigramCollocationFinder.from_words(all_syllables)\n tri_dict = dict(sorted(tcf.ngram_fd.items(), key=lambda t: (-t[1], t[0])))\n bi_dict = dict(sorted(bcf.ngram_fd.items(), key=lambda t: (-t[1], t[0])))\n\n # Create dictionary to count cond prob all phoneme tuples\n accepted_phonemes = [i[0] for i in cmudict.phones()]\n accepted_phonemes.append('<s>')\n accepted_phonemes.append('</s>')\n phoneme_tups = [p for p in itertools.product(accepted_phonemes, repeat=3)]\n cond_probs_dict = dict([(char, 0) for char in phoneme_tups])\n\n for t in tri_dict:\n p1, p2, p3 = t[0], t[1], t[2]\n tri_count = tri_dict[t]\n bi_count = bi_dict[(p1, p2)]\n if bi_count > 1:\n cond_prob = tri_count * 1.0 / bi_count\n else:\n cond_prob = 0.0\n cond_probs_dict[(p1, p2, p3)] = cond_prob\n\n pickle.dump(cond_probs_dict, open(COND_PROBS_PATH, \"wb\"))\n return", "def __init__(self, P: int, M: int, dtype: type = np.complex128):\n # Check P\n try:\n P = int(P)\n except ValueError:\n print(\"P must be a number.\")\n # Check M\n try:\n M = int(M)\n except ValueError:\n print(\"M must be a number.\")\n self.P = P\n self.M = M\n self.N = 2 * M + 1\n self.__offsets = -(np.arange(1, self.N + 1) - 1 - self.P)\n\n # Init from super class\n shape = ((self.N - self.P) * (self.P + 1), self.N)\n super(ToeplitzificationOperator, self).__init__(shape=shape, dtype=dtype)\n\n self.norm = np.sqrt(self.P + 1)\n self.gram = toep_gram(self.P, self.N)", "def config_params1(parameter):\n\n p = parameter['p']\n q = parameter['q']\n d = parameter['d']\n m = parameter['m']\n pdq_m = list(itertools.product(p, d, q,m)) #Generate all different combinations of p, q and q triplets\n params = [[(x[0], x[1], x[2]),(x[0], x[1], x[2], x[3])] for x in pdq_m]\n return params", "def __init__(self, p=0.5):\n assert 0. <= p <= 1.\n self.p = p\n self.rng = T.shared_randomstreams.RandomStreams(seed=123456)\n self.params = []", "def PowerCurveParametricExample1():\n # --- Parameters for this script\n ref_dir = 'NREL5MW/' # Folder where the fast input files are located (will be copied)\n work_dir = 'NREL5MW_ParametricPowerCurve1/' # Output folder (will be created)\n main_file = 'Main_Onshore_OF2.fst' # Main file in ref_dir, used as a template\n FAST_EXE = 'NREL5MW/OpenFAST2_x64s_ebra.exe' # Location of a FAST exe (and dll)\n\n # --- Defining the parametric study (list of dictionnaries with keys as FAST parameters)\n WS = [3,5,7,9 ,11,13,15]\n RPM = [5,6,7,10,10,10,10] # initial conditions\n PITCH = [0,0,0,0 ,5 ,10,15] # initial conditions\n BaseDict = {'FAST|TMax': 100, 'FAST|DT': 0.01, 'FAST|DT_Out': 0.1}\n #BaseDict = fastlib.paramsNoController(BaseDict)\n #BaseDict = fastlib.paramsStiff(BaseDict)\n #BaseDict = fastlib.paramsNoGen(BaseDict)\n PARAMS=[]\n for wsp,rpm,pitch in zip(WS,RPM,PITCH): # NOTE: same length of WS and RPM otherwise do multiple for loops\n p=BaseDict.copy()\n p['EDFile|RotSpeed'] = rpm\n p['EDFile|BlPitch(1)'] = pitch\n p['EDFile|BlPitch(2)'] = pitch\n p['EDFile|BlPitch(3)'] = pitch\n p['InflowFile|HWindSpeed'] = wsp\n p['InflowFile|WindType'] = 1 # Setting steady wind\n PARAMS.append(p)\n # --- Defining a function to name the files based on the parameters\n def naming(p):\n return 'ws{:04.1f}'.format(p['InflowFile|HWindSpeed'])\n\n # --- Generating all files in a workdir\n fastfiles=fastlib.templateReplace(PARAMS,ref_dir,work_dir,name_function=naming,RemoveRefSubFiles=True,main_file=main_file)\n print(fastfiles)\n\n # --- Creating a batch script just in case\n fastlib.writeBatch(os.path.join(work_dir,'_RUN_ALL.bat'), fastfiles,fastExe=FAST_EXE)\n # --- Running the simulations\n fastlib.run_fastfiles(fastfiles,fastExe=FAST_EXE,parallel=True,ShowOutputs=False,nCores=2)\n\n # --- Simple Postprocessing\n outFiles = [os.path.splitext(f)[0]+'.outb' for f in fastfiles]\n\n avg_results = fastlib.averagePostPro(outFiles,avgMethod='constantwindow',avgParam=10, ColMap = {'WS_[m/s]':'Wind1VelX_[m/s]'},ColSort='WS_[m/s]')\n print(avg_results)\n avg_results.to_csv('PowerCurve1.csv',sep='\\t',index=False)", "def build_mlp(input_, config):\n current_input = input_\n print(current_input)\n for i in range(len(config['fcc_layers']) - 1):\n current_input = tf.keras.layers.Dense(\n units=config['fcc_layers'][i], activation='tanh',\n name='fcc_layer_{}'.format(i + 1))(current_input)\n current_input = tf.keras.layers.Dropout(\n rate=config['dropout'], name='dropout_{}'.format(i + 1))(current_input)\n cascade_embedding_layer = tf.keras.layers.Dense(\n units=config['fcc_layers'][-1], activation='tanh',\n name='cascade_embedding_layer')(current_input)\n cascade_embedding_layer_do = tf.keras.layers.Dropout(\n rate=config['dropout'],\n name='cascade_embedding_dropout')(cascade_embedding_layer)\n prediction_layer = tf.keras.layers.Dense(\n units=1, activation='sigmoid',\n name='prediction_layer')(cascade_embedding_layer_do)\n return cascade_embedding_layer, prediction_layer", "def create_CGfiles_using_martinizepy(Ctermini_type, set_charge, name):\n\n os.system('cp %s/%s ./'%(this_path,martini_itp))\n\n os.system('python2 %s/martinize.py -f %s_aa.pdb \\\n -o %s.top -x %s.pdb -name %s -ff martini22 \\\n -nt \\\n -ss CCCCCCCCCCCC '%(this_path,name,name,name,name))\n\n\n # Collect lines defining atoms\n lines_atoms = []\n break1,break2 = None,None\n with open('%s.itp'%name, 'r') as f:\n data = f.readlines()\n start = False\n for i,line in enumerate(data):\n if '[ atoms ]' in line:\n start = True\n break1 = i+1\n continue\n if start:\n if line.split()==[]:\n start = False\n break2 = i\n break\n lines_atoms = lines_atoms + [line]\n \n \n\n # Modify lines_atoms as per Ctermini\n charged_thusfar = 0\n if Ctermini_type.upper() == 'OH':\n for i in range(len(lines_atoms))[::-1]:\n if 'BB' in lines_atoms[i]:\n lines_atoms[i] = lines_atoms[i].replace(' 0.0', '-1.0')\n lines_atoms[i] = lines_atoms[i].replace('P5', 'Qa') \n charged_thusfar += -1\n break\n\n\n # modify charge of side chains,\n # CURRENTLY only neutralizes if Qd SC is found (deprotonation)\n neutralize_ahead = False\n if set_charge < 0: # deprotonation\n for i in range(len(lines_atoms))[::-1]:\n if charged_thusfar == set_charge:\n neutralize_ahead = True\n \n if ('SC' in lines_atoms[i]) and ('-1.0' in lines_atoms[i]):\n if 'Qa' not in lines_atoms[i]:\n raise RuntimeError('-1.0 charge without Qa bead is found')\n if neutralize_ahead:\n lines_atoms[i] = lines_atoms[i].replace('-1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qa', 'P1')\n else:\n charged_thusfar += -1\n\n if ('SC' in lines_atoms[i]) and (' 1.0' in lines_atoms[i]):\n if 'Qd' not in lines_atoms[i]:\n raise RuntimeError('1.0 charge without Qd bead is found')\n lines_atoms[i] = lines_atoms[i].replace('1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qd', 'P1')\n\n if charged_thusfar != set_charge:\n raise ValueError('Peptide sequence could not be used to achieve set_charge')\n\n elif set_charge == 0: # protonation-deprotonation\n if Ctermini_type == 'OH':\n raise ValueError('Protonation after deprotonation does not make sense')\n \n for i in range(len(lines_atoms))[::-1]:\n if ('SC' in lines_atoms[i]) and ('-1.0' in lines_atoms[i]):\n if 'Qa' not in lines_atoms[i]:\n raise RuntimeError('-1.0 charge without Qa bead is found')\n lines_atoms[i] = lines_atoms[i].replace('-1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qa', 'P1')\n\n if ('SC' in lines_atoms[i]) and (' 1.0' in lines_atoms[i]):\n if 'Qd' not in lines_atoms[i]:\n raise RuntimeError('1.0 charge without Qd bead is found')\n lines_atoms[i] = lines_atoms[i].replace('1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qd', 'P1')\n \n elif set_charge > 0: # protonation\n if Ctermini_type == 'OH':\n raise ValueError('Protonation after deprotonation does not make sense')\n\n for i in range(len(lines_atoms))[::-1]:\n if charged_thusfar == set_charge:\n neutralize_ahead = True\n\n if ('SC' in lines_atoms[i]) and ('-1.0' in lines_atoms[i]):\n if 'Qa' not in lines_atoms[i]:\n raise RuntimeError('-1.0 charge without Qa bead is found')\n lines_atoms[i] = lines_atoms[i].replace('-1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qa', 'P1')\n \n if ('SC' in lines_atoms[i]) and (' 1.0' in lines_atoms[i]):\n if 'Qd' not in lines_atoms[i]:\n raise RuntimeError('1.0 charge without Qd bead is found')\n if neutralize_ahead:\n lines_atoms[i] = lines_atoms[i].replace('1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qd', 'P1')\n else:\n charged_thusfar += 1\n \n if charged_thusfar != set_charge:\n raise ValueError('Peptide sequence could not be used to achieve set_charge')\n\n\n data_new = ''\n for line in data[:break1]:\n data_new += line\n for line in lines_atoms:\n data_new += line\n for line in data[break2:]:\n data_new += line\n \n \n with open('%s.itp'%name, 'w') as f:\n f.write(data_new)", "def __init__(self, p, hyperpara, para, inst_name=\"\"):\n \n self.hyperpara = hyperpara\n self.para = para\n \n s = -np.log(-np.log(1.0 - p))\n \n k, cop = hyperpara\n \n # Sets colour\n colour = [\n None,\n None,\n {\n \"i\": 2,\n \"me\": 3},\n {\n \"i\": 0,\n \"me\": 1}][k][cop]\n \n if cop == \"i\":\n if k == 3:\n def f(X):\n q1, q2, q3 = X\n \n Y = np.array([q1, q2 - q1, q3 - q2])\n \n if np.any(Y <= 0.0):\n return None\n \n return -0.5 * np.sum(((Y - para[:, 0]) / para[:, 1]) ** 2)\n elif k == 2:\n def f(X):\n sigma, q1, q2 = X\n \n if sigma <= 0:\n return None\n \n Y = np.array([q1, q2 - q1])\n \n if np.any(Y <= 0.0):\n return None\n \n a = -0.5 * np.sum(((Y - para[:, 0]) / para[:, 1]) ** 2)\n \n return a - np.log(sigma)\n elif cop == \"me\":\n q_marg = [None for _ in range(k)]\n \n for i in range(k):\n dist = Normal(para[i, 0], para[i, 1])\n q_marg[i] = TruncatedDistribution(\n dist,\n 0.0,\n TruncatedDistribution.LOWER)\n \n ot = MaximumEntropyOrderStatisticsDistribution(q_marg)\n \n if k == 3:\n def f(X):\n q1, q2, q3 = X\n \n if np.any(np.array([q1, q2 - q1, q3 - q2]) <= 0.0):\n return None\n \n Y = ot.computePDF(X)\n \n if Y <= 0:\n return None\n \n return np.log(Y)\n elif k == 2:\n def f(X):\n sigma, q1, q2 = X\n \n if sigma <= 0 or q1 <= 0.0 or q2 <= q1:\n return None\n \n Y = ot.computePDF([q1, q2])\n \n if Y <= 0:\n return None\n \n return np.log(Y) - np.log(sigma)\n \n if k == 3:\n # Transformation (mu, theta, xi) -> (q1, q2, q3)\n def g(X):\n mu, sigma, xi = X\n \n if sigma <= 0:\n return None\n \n # When xi is close enough to 0, we consider it equal to 0\n if abs(xi) < 1e-300:\n q = mu + sigma * s\n else:\n q = mu + sigma * (np.exp(xi * s) - 1.0) / xi\n \n if q[0] < 0.0:\n return None\n return q\n \n \n # Log of determinant of g\n def g_det(X):\n mu, sigma, xi = X\n \n if abs(xi) < 1e-300:\n return np.log(sigma)\n \n e = np.exp(s * xi)\n \n sm = [\n s[i] * e[i] * (e[(i + 2) % 3] - e[(i + 1) % 3])\n for i in range(3)]\n \n return np.log(sigma) + np.log(sum(sm)) - np.log(xi ** 2.0)\n elif k == 2:\n # Transformation (mu, sigma, xi) -> (sigma, q1, q2)\n def g(X):\n mu, sigma, xi = X\n \n # When xi is close enough to 0, we consider it equal to 0\n if abs(xi) < 1e-300:\n q = mu + sigma * s\n else:\n q = mu + sigma * (np.exp(xi * s) - 1.0) / xi\n \n if q[0] < 0.0:\n return None\n \n return np.concatenate(([sigma], q))\n \n \n # Log of determinant of g\n def g_det(X):\n mu, sigma, xi = X\n \n if abs(xi) < 1e-300:\n return np.log(sigma)\n \n e = (s * xi - 1.0) * np.exp(s * xi)\n \n f = np.log(abs(e[0] - e[1]))\n \n return np.log(sigma) + f - np.log(xi ** 2.0)\n \n super().__init__(\n util.log_transform(f, g, g_det),\n colour=colour,\n inst_name=inst_name)\n \n if k == 2:\n self.prior[\"proper\"] = False", "def _make_mcnp_input(self):\n # Create the problem description\n lines = ['Point source in infinite geometry']\n\n # Create the cell cards: material 1 inside sphere, void outside\n lines.append('c --- Cell cards ---')\n if self._temperature is not None:\n kT = self._temperature * K_BOLTZMANN * 1e-6\n lines.append(f'1 1 -{self.density} -1 imp:n=1 tmp={kT}')\n else:\n lines.append(f'1 1 -{self.density} -1 imp:n=1')\n lines.append('2 0 1 imp:n=0')\n lines.append('')\n\n # Create the surface cards: box centered on origin with 2e9 cm sides`\n # and reflective boundary conditions\n lines.append('c --- Surface cards ---')\n lines.append('*1 rpp -1.e9 1e9 -1.e9 1.e9 -1.e9 1.e9')\n lines.append('')\n\n # Create the data cards\n lines.append('c --- Data cards ---')\n\n # Materials\n if re.match('(71[0-6]nc)', self.suffix):\n name = szax(self.nuclide, self.suffix)\n else:\n name = zaid(self.nuclide, self.suffix)\n lines.append(f'm1 {name} 1.0')\n if self.thermal is not None:\n lines.append(f'mt1 {self.thermal}')\n lines.append('nonu 2')\n\n # Physics: neutron transport\n lines.append('mode n')\n\n # Source definition: isotropic point source at center of sphere\n energy = self.energy * 1e-6\n lines.append(f'sdef cel=1 erg={energy}')\n\n # Tallies: neutron flux over cell\n lines.append('f4:n 1')\n min_energy = self._min_energy * 1e-6\n lines.append(f'e4 {min_energy} {self._bins-1}ilog {1.0001*energy}')\n\n # Problem termination: number of particles to transport\n lines.append(f'nps {self.particles}')\n\n # Write the problem\n with open(self.other_dir / 'inp', 'w') as f:\n f.write('\\n'.join(lines))", "def _MoeLayerParams(ff_p):\n assert issubclass(ff_p.cls,\n layers_with_attention.TransformerFeedForwardLayer)\n assert p.num_experts > 0\n moe_p = p.moe_layer_tpl.Copy()\n # Copy over the base params.\n base_layer.BaseLayer.CopyBaseParams(ff_p, moe_p)\n # Set other params.\n moe_p.name = ff_p.name\n moe_p.input_dim = ff_p.input_dim\n moe_p.output_dim = ff_p.output_dim\n moe_p.hidden_dim = ff_p.hidden_dim\n moe_p.activation = ff_p.activation\n moe_p.residual_dropout_prob = ff_p.residual_dropout_prob\n moe_p.relu_dropout_prob = ff_p.relu_dropout_prob\n moe_p.dropout_tpl = ff_p.residual_dropout_tpl.Copy()\n moe_p.num_groups = p.num_groups\n moe_p.min_group_size = p.min_group_size\n moe_p.num_experts = p.num_experts\n # weight_split_dims_mapping and activation_split_dims_mapping should have\n # been set through p.moe_layer_tpl params.\n return moe_p", "def main(model,pmap):\n\n addPppParams(model)\n\n# addTransportParams(model,pmap)\n\n #translationSources(model)\n\n #addLipidMetabs(model)\n\n return", "def create_p3_feature(C3, C4, C5, feature_size=256):\n P5 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C5_reduced')(C5)\n P5_upsampled = layers.UpsampleLike(name='P5_upsampled')([P5, C4])\n\n # add P5 elementwise to C4\n P4 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C4_reduced')(C4)\n P4 = keras.layers.Add(name='P4_merged')([P5_upsampled, P4])\n P4_upsampled = layers.UpsampleLike(name='P4_upsampled')([P4, C3])\n\n # add P4 elementwise to C3\n P3 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C3_reduced')(C3)\n P3 = keras.layers.Add(name='P3_merged')([P4_upsampled, P3])\n P3 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P3')(P3)\n\n return P3", "def build(c):", "def create(pdef):\n from sklearn.pipeline import Pipeline\n return [Pipeline(p) for p in pdef]", "def ml_params(tp_vcf, fp_vcf, metrics, format_metrics):\n metrics = ['Entropy', 'FS', 'MFE',\n 'MQ', 'NBQ', 'ReadPosEndDist']\n exploring = False\n with open(tp_vcf) as in_handle:\n df_tp = read_vcf_metrics(in_handle, metrics, format_metrics, 1,\n exploring)\n with open(fp_vcf) as in_handle:\n df_fp = read_vcf_metrics(in_handle, metrics, format_metrics, -1,\n exploring)\n df = pandas.concat([df_tp, df_fp], keys=[\"tp\", \"fp\"])\n df = df.fillna({\"NBQ\": df[\"NBQ\"].mean(), \"PL\" : df[\"PL\"].mean(),\n \"AD\" : df[\"AD\"].mean(), \"FS\": 0.0, \"DP\": df[\"DP\"].mean()})\n df = normalize_inputs(df, metrics + format_metrics)\n for val, name in [(0, \"snp\"), (1, \"indel\")]:\n print \"--->\", name\n linear_metric_explore(df[df[\"indel\"] == val], metrics + format_metrics)\n #ml_param_explore(df[df[\"indel\"] == val], metrics + format_metrics,\n # exploring)", "def eff_param_string():\n return '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'", "def config_params0(data,parameter):\n model = []\n #Range of value of p\n acf = sm.graphics.tsa.acf(data.diff().dropna())\n for i in range(len(acf)):\n acf[i] = abs(acf[i]*10)\n if (ceil(acf[i])) <= 2:\n p = range(ceil(acf[i])-1,ceil(acf[i])+2)\n break\n\n #range of value of q\n pacf = sm.graphics.tsa.pacf(data.diff().dropna())\n for i in range(len(pacf)):\n pacf[i] = abs(pacf[i]*10)\n if (ceil(pacf[i])) <= 2:\n q = range(ceil(pacf[i])-1,ceil(pacf[i])+2)\n break\n\n\t# define config lists\n p_params = p\n d_params = parameter['d']\n q_params = q\n m_params = parameter['m']\n #P_params = p\n #D_params = [0, 1]\n #Q_params = q\n \n pdq_m = list(itertools.product(p_params, d_params, q_params,m_params)) #Generate all different combinations of p, q and q triplets\n params = [[(x[0], x[1], x[2]),(x[0], x[1], x[2], x[3])] for x in pdq_m]\n return params", "def mo_parse_p(self, filepath):\n\n # Now, can reprocess using tesseract-ocr rather than pdftotext\n ptext = textract.process(filepath, method='tesseract', encoding='utf-8')\n ptext = ptext.replace(b'\\xe2\\x80\\x94', b'-')\n ptext = ptext.decode('utf-8')\n keys = list(self.mo_coefficient_name_map.keys())\n\n # Get the calibration date:\n for line in ptext.splitlines():\n if 'CALIBRATION DATE' in line:\n items = line.split()\n ind = items.index('DATE:')\n cal_date = items[ind+1]\n cal_date = pd.to_datetime(cal_date).strftime('%Y%m%d')\n self.date.update({len(self.date): cal_date})\n\n if 'psia S/N' in line:\n items = line.split()\n ind = items.index('psia')\n prange = items[ind-1]\n name = self.mo_coefficient_name_map.get('prange')\n self.coefficients.update({name: prange})\n\n # Loop through each line looking for the lines which contain\n # calibration coefficients\n if '=' in line:\n # Tesseract-ocr misreads '0' as O, and 1 as IL\n line = line.replace('O', '0').replace('IL', '1').replace(\n '=', '').replace(',.', '.').replace(',', '.')\n line = line.replace('L', '1').replace('@', '0').replace('l', '1').replace('--', '-')\n if '11' in line and 'PA2' not in line:\n line = line.replace('11', '1')\n items = line.split()\n for n, k in enumerate(items):\n if k.lower() in keys:\n try:\n float(items[n+1])\n name = self.mo_coefficient_name_map.get(k.lower())\n self.coefficients.update({name: items[n+1]})\n except:\n pass\n if 'CC_ptcb2' not in list(self.mo_coefficient_name_map.keys()):\n self.coefficients.update({'CC_ptcb2': '0.000000e+000'})", "def _construct_kld_costs(self, p=1.0):\n kld_hi_q2ps = []\n kld_hi_p2qs = []\n for i in range(self.ir_steps):\n kld_hi_q2p = self.kldi_q2p[i]\n kld_hi_p2q = self.kldi_p2q[i]\n kld_hi_q2ps.append(T.sum(kld_hi_q2p**p, \\\n axis=1, keepdims=True))\n kld_hi_p2qs.append(T.sum(kld_hi_p2q**p, \\\n axis=1, keepdims=True))\n # compute the batch-wise costs\n kld_hi_q2p = sum(kld_hi_q2ps)\n kld_hi_p2q = sum(kld_hi_p2qs)\n # construct KLd cost for the distributions over z\n kld_z_q2ps = gaussian_kld(self.q_z_mean, self.q_z_logvar, \\\n self.p_z_mean, self.p_z_logvar)\n kld_z_p2qs = gaussian_kld(self.p_z_mean, self.p_z_logvar, \\\n self.q_z_mean, self.q_z_logvar)\n kld_z_q2p = T.sum(kld_z_q2ps**p, axis=1, keepdims=True)\n kld_z_p2q = T.sum(kld_z_p2qs**p, axis=1, keepdims=True)\n return [kld_z_q2p, kld_z_p2q, kld_hi_q2p, kld_hi_p2q]", "def __init__(self, osi, fy, fu, es, esh, eps_sh, eps_ult, cf, alpha_2, cd):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.es = float(es)\n self.esh = float(esh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n self.cf = float(cf)\n self.alpha_2 = alpha_2\n self.cd = float(cd)\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.es, self.esh, self.eps_sh, self.eps_ult, '-CMFatigue', self.cf, self.alpha_2, self.cd]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def _make_objective(ppci, net):\n\n ng = len(ppci[\"gen\"])\n\n # Determine length of gencost array\n if (net.piecewise_linear_cost.type == \"q\").any() or (net.polynomial_cost.type == \"q\").any():\n len_gencost = 2 * ng\n else:\n len_gencost = 1 * ng\n\n # get indices\n eg_idx = net._pd2ppc_lookups[\"ext_grid\"] if \"ext_grid\" in net._pd2ppc_lookups else None\n gen_idx = net._pd2ppc_lookups[\"gen\"] if \"gen\" in net._pd2ppc_lookups else None\n sgen_idx = net._pd2ppc_lookups[\"sgen_controllable\"] if \"sgen_controllable\" in \\\n net._pd2ppc_lookups else None\n load_idx = net._pd2ppc_lookups[\"load_controllable\"] if \"load_controllable\" in \\\n net._pd2ppc_lookups else None\n dc_gens = net.gen.index[(len(net.gen) - len(net.dcline) * 2):]\n from_gens = net.gen.loc[dc_gens[1::2]]\n if gen_idx is not None:\n dcline_idx = gen_idx[from_gens.index]\n\n # calculate size of gencost array\n if len(net.piecewise_linear_cost):\n n_coefficients = net.piecewise_linear_cost.p.values[0].shape[1] * 2\n else:\n n_coefficients = 0\n if len(net.polynomial_cost):\n n_coefficients = max(n_coefficients, net.polynomial_cost.c.values[0].shape[1], 4)\n\n if n_coefficients:\n # initialize array\n ppci[\"gencost\"] = zeros((len_gencost, 4 + n_coefficients), dtype=float)\n ppci[\"gencost\"][:, MODEL:COST + 4] = array([1, 0, 0, 2, 0, 0, 1, 0])\n\n if len(net.piecewise_linear_cost):\n\n for type in [\"p\", \"q\"]:\n if (net.piecewise_linear_cost.type == type).any():\n costs = net.piecewise_linear_cost[net.piecewise_linear_cost.type == type]\n p = concatenate(costs.p)\n f = concatenate(costs.f)\n\n if type == \"q\":\n shift_idx = ng\n else:\n shift_idx = 0\n\n for el in [\"gen\", \"sgen\", \"ext_grid\", \"load\", \"dcline\"]:\n\n if not costs.element[costs.element_type == el].empty:\n if el == \"gen\":\n idx = gen_idx\n if el == \"sgen\":\n idx = sgen_idx\n if el == \"ext_grid\":\n idx = eg_idx\n if el == \"load\":\n idx = load_idx\n if el == \"dcline\":\n idx = dcline_idx\n\n if not costs.element[costs.element_type == el].empty:\n elements = idx[costs.element[costs.element_type ==\n el].values.astype(int)] + shift_idx\n ppci[\"gencost\"][elements, COST::2] = p[\n costs.index[costs.element_type == el]]\n if el in [\"load\", \"dcline\"]:\n ppci[\"gencost\"][elements, COST + 1::2] = - \\\n f[costs.index[costs.element_type == el]] * 1e3\n else:\n ppci[\"gencost\"][elements, COST + 1::2] = f[\n costs.index[costs.element_type == el]] * 1e3\n\n ppci[\"gencost\"][elements, NCOST] = n_coefficients / 2\n ppci[\"gencost\"][elements, MODEL] = 1\n\n if len(net.polynomial_cost):\n\n for type in [\"p\", \"q\"]:\n if (net.polynomial_cost.type == type).any():\n costs = net.polynomial_cost[net.polynomial_cost.type == type]\n c = concatenate(costs.c)\n n_c = c.shape[1]\n c = c * power(1e3, array(range(n_c))[::-1])\n\n if type == \"q\":\n shift_idx = ng\n else:\n shift_idx = 0\n\n for el in [\"gen\", \"sgen\", \"ext_grid\", \"load\", \"dcline\"]:\n\n if not costs.element[costs.element_type == el].empty:\n if el == \"gen\":\n idx = gen_idx\n if el == \"sgen\":\n idx = sgen_idx\n if el == \"ext_grid\":\n idx = eg_idx\n if el == \"load\":\n idx = load_idx\n if el == \"dcline\":\n idx = dcline_idx\n\n elements = idx[costs.element[costs.element_type ==\n el].values.astype(int)] + shift_idx\n if el in [\"load\", \"dcline\"]:\n ppci[\"gencost\"][elements, COST:(COST + n_c):] = - \\\n c[costs.index[costs.element_type == el]]\n else:\n ppci[\"gencost\"][elements, COST:(\n COST + n_c):] = c[costs.index[costs.element_type == el]]\n\n ppci[\"gencost\"][elements, NCOST] = n_c\n ppci[\"gencost\"][elements, MODEL] = 2\n\n else:\n ppci[\"gencost\"] = zeros((len_gencost, 8), dtype=float)\n # initialize as pwl cost - otherwise we will get a user warning from\n # pypower for unspecified costs.\n ppci[\"gencost\"][:, :] = array([1, 0, 0, 2, 0, 0, 1, 1000])\n\n return ppci", "def MAP(cpts, obs, terms):\r\n\r\n # a list to store the computed probabilities\r\n all_sums = []\r\n # initialize all terms to false\r\n for value in range(len(terms)):\r\n terms[value] = [terms[value], '0']\r\n search_array = terms + obs\r\n # if all terms are being watched, just call MPE\r\n if len(search_array) == len(cpts):\r\n return MPE(cpts, obs)\r\n # we need to know what terms we aren't interested in so we start with \r\n # or terms and observations and note the variables that appear in CPT but\r\n # not in those\r\n dont_count = []\r\n for var in cpts:\r\n if [var[0], '0'] not in search_array and [var[0], '1'] not in search_array:\r\n dont_count.append(var[0])\r\n terms.append([var[0],'1'])\r\n # sort the terms to ensure correct ordering\r\n terms.sort()\r\n # creates a list of all possible bit strings\r\n # just an easy way to create all possible truth assignments\r\n seq = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=len(terms))]\r\n # loop through all possible truth assignments\r\n for j in range(len(seq)):\r\n # we initialize at probability = 100%\r\n chance = 1\r\n # assign the truth values\r\n for k in range(len(seq[j])):\r\n terms[k][1] = seq[j][k]\r\n # this computes the probability using the chaining rule\r\n for i in range(len(terms)):\r\n new_terms = terms[:-i-1] + obs\r\n new_terms.sort()\r\n chance *= probability(cpts,terms[-i-1], new_terms)\r\n # add the probabilities to our list\r\n all_sums.append(chance)\r\n combine = []\r\n # note all variables which weren't in obs or Vs\r\n for i in dont_count:\r\n combine.append(terms.index([i,'1']))\r\n # this will store the final probabilities\r\n final_array = [0] * len(seq)\r\n # another complicated looking loop, it just serves to combine probabilities\r\n # for example, if we have a CPT with x_1, x_2, x_3, x_4 and we observe \r\n # x_1 to be true and have Vs = [x_3, x_4] then we need to combine the \r\n # probabilities that are the same except for x_2 = true vs false\r\n for loc in combine:\r\n for sequence in range(len(seq)):\r\n for alt_sequence in range(sequence+1,len(seq)):\r\n if (seq[sequence][:loc] + seq[sequence][loc+1:]) == (seq[alt_sequence][:loc] + seq[alt_sequence][loc+1:]):\r\n final_array[sequence] = all_sums[sequence] + all_sums[alt_sequence]\r\n\r\n # get the truth assignment for the highest probability\r\n location = seq[final_array.index(max(final_array))]\r\n truth_assignment = []\r\n # place the truth assignment in a more readable fashion\r\n for value in range(len(terms)):\r\n if terms[value] in search_array:\r\n if location[value] == '0':\r\n truth_assignment.append(terms[value][0]+ ' = False')\r\n else:\r\n truth_assignment.append(terms[value][0]+ ' = True')\r\n return (truth_assignment)", "def generate():", "def compute_derived_parameters(cls, fdict):\n cgg = fdict['cgd'] + fdict['cgs'] + fdict['cgb']\n return dict(\n cgg=cgg,\n cdd=fdict['cgd'] + fdict['cds'] + fdict['cdb'],\n css=fdict['cgs'] + fdict['cds'] + fdict['csb'],\n cbb=fdict['cgb'] + fdict['cdb'] + fdict['csb'],\n vstar=2.0 * (fdict['ids'] / fdict['gm']),\n gain=fdict['gm'] / fdict['gds'],\n ft=fdict['gm'] / (2.0 * np.pi * cgg),\n )", "def get_labels_comp(F, is_p, is_m):\n labels = [\"C\"+str(idx+1)+\"|P\" if is_p[idx]\n else \"C\"+str(idx+1)+\"|M\" if is_m[idx]\n else \"C\"+str(idx+1) for idx in range(F.shape[0])]\n return labels", "def def_paramt():\n Zeff = 1.0\n amu = 2.0\n mf = mp*amu\n return Zeff, amu,mf", "def mtf_transformer2_base():\n hparams = common_hparams.basic_params1()\n\n hparams.add_hparam(\"d_model\", 1024)\n hparams.batch_size = 4\n hparams.max_length = 1024\n hparams.label_smoothing = 0.0\n # a small positive value - this seems important for stability when training\n # with bfloat16 activations.\n hparams.add_hparam(\"z_loss\", 1e-4)\n\n # hparams applying to both encoder and decoder layer stacks.\n hparams.add_hparam(\"d_ff\", 2048)\n hparams.add_hparam(\"d_kv\", 128)\n hparams.add_hparam(\"attention_dropout\", 0.0)\n hparams.add_hparam(\"relu_dropout\", 0.0)\n hparams.del_hparam(\"num_heads\")\n hparams.del_hparam(\"num_hidden_layers\")\n hparams.layer_prepostprocess_dropout = 0.0\n hparams.add_hparam(\"extra_logit\", False)\n # number of experts for moe_1d\n hparams.moe_num_experts = 32\n # number of experts for moe_2d = moe_expert_x * moe_expert_y\n hparams.add_hparam(\"moe_expert_x\", 8)\n hparams.add_hparam(\"moe_expert_y\", 4)\n hparams.add_hparam(\"moe_hidden_size\", 32768)\n\n # round up vocab sizes to be a multiple of this value\n hparams.vocab_divisor = 128\n\n hparams.optimizer = \"Adafactor\"\n hparams.learning_rate_schedule = \"rsqrt_decay*linear_decay\"\n hparams.learning_rate_warmup_steps = 10000\n hparams.add_hparam(\"master_dtype\", \"bfloat16\")\n hparams.add_hparam(\"slice_dtype\", \"float32\")\n hparams.activation_dtype = \"bfloat16\"\n\n # 8-way model-parallelism\n hparams.add_hparam(\"mesh_shape\", \"model:8\")\n hparams.add_hparam(\"layout\", \"batch:batch;vocab:model;d_ff:model;heads:model\")\n\n # If nonzero, we split the batch across two tensor-dimensions named\n # \"outer_batch\" and \"inner_batch\", allowing for splitting across two mesh\n # dimensions. This is necessary for hierarchical mixture of experts.\n # The two tensor dimensions have sizes hparams.outer_batch_size and\n # hparams.batch_size // hparams.outer_batch_size.\n hparams.add_hparam(\"outer_batch_size\", 0)\n\n hparams.shared_embedding_and_softmax_weights = False\n # length for training or decoding - defaults to max_length\n hparams.add_hparam(\"length\", 0)\n\n # These parameters make Transformer model compatible with mtf\n # Do not override these.\n hparams.no_data_parallelism = True\n hparams.use_fixed_batch_size = True\n hparams.add_hparam(\"mtf_mode\", True)\n hparams.clip_grad_norm = 0. # i.e. no gradient clipping\n hparams.bottom = {\n \"inputs\": modalities.identity_bottom,\n \"targets\": modalities.identity_bottom,\n }\n hparams.top = {\n \"targets\": modalities.identity_top,\n }\n hparams.add_hparam(\"beam_size\", 1)\n\n # If this is True, then in a packed dataset (where exaples are concatenated\n # to form longer examples) we use the global position (within the concatenated\n # sequence) to compute the positional embedding, instead of the position\n # within the individual sequence. This is counterintuitive, but for some\n # reason, it keeps the model from diverging.\n hparams.add_hparam(\"use_global_position_in_packed_sequence\", True)\n\n return hparams", "def _make_serpent_input(self):\n # Create the problem description\n lines = ['% Point source in infinite geometry']\n lines.append('')\n\n # Set the cross section library directory\n if self.xsdir is not None:\n xsdata = (self.other_dir / 'xsdata').resolve()\n lines.append(f'set acelib \"{xsdata}\"')\n lines.append('')\n \n # Create the cell cards: material 1 inside sphere, void outside\n lines.append('% --- Cell cards ---')\n lines.append('cell 1 0 m1 -1')\n lines.append('cell 2 0 outside 1')\n lines.append('')\n\n # Create the surface cards: box centered on origin with 2e9 cm sides`\n # and reflective boundary conditions\n lines.append('% --- Surface cards ---')\n lines.append('surf 1 cube 0.0 0.0 0.0 1.e9')\n\n # Reflective boundary conditions\n lines.append('set bc 2')\n lines.append('')\n\n # Create the material cards\n lines.append('% --- Material cards ---')\n name = zaid(self.nuclide, self.suffix)\n if self.thermal is not None:\n Z, A, m = openmc.data.zam(self.nuclide)\n lines.append(f'mat m1 -{self.density} moder t1 {1000*Z + A}')\n else:\n lines.append(f'mat m1 -{self.density}')\n lines.append(f'{name} 1.0')\n\n # Add thermal scattering library associated with the nuclide\n if self.thermal is not None:\n lines.append(f'therm t1 {self.thermal}')\n lines.append('')\n\n # External source mode with isotropic point source at center of sphere\n lines.append('% --- Set external source mode ---')\n lines.append(f'set nps {self.particles} {self._batches}')\n energy = self.energy * 1e-6\n lines.append(f'src 1 n se {energy} sp 0.0 0.0 0.0')\n lines.append('')\n\n # Detector definition: flux energy spectrum\n lines.append('% --- Detector definition ---')\n lines.append('det 1 de 1 dc 1')\n\n # Energy grid definition: equal lethargy spacing\n min_energy = self._min_energy * 1e-6\n lines.append(f'ene 1 3 {self._bins} {min_energy} {1.0001*energy}')\n lines.append('')\n\n # Treat fission as capture\n lines.append('set nphys 0')\n\n # Turn on unresolved resonance probability treatment\n lines.append('set ures 1')\n\n # Write the problem\n with open(self.other_dir / 'input', 'w') as f:\n f.write('\\n'.join(lines))", "def build(cmpd, density=0.5*u.gram/(u.cm**3), n_compounds=1000, \n ff='ff/TraPPE_UA_3_fully_flexible_propane.xml'):\n density.convert_to_units(u.kilogram/u.m**3)\n\n # Pack a box\n box = mb.fill_box(cmpd, n_compounds=n_compounds, density=density.value)\n\n # Wrap coordinates\n new_xyz = box.xyz - 1 * np.floor_divide(box.xyz, box.periodicity) * box.periodicity\n box.xyz = new_xyz\n\n # Apply non-atomistic, custom element naming convention\n for part in box.particles():\n part.name = \"_\" + part.name\n\n # Utilize foyer to parametrize our box\n ff = foyer.Forcefield(forcefield_files=ff)\n box = box.to_parmed(infer_residues=True)\n parametrized_structure = ff.apply(box, combining_rule='lorentz')\n\n # Dump initial coordinates\n parametrized_structure.save('compound.pdb', overwrite=True)\n parametrized_structure.save('compound.mol2', overwrite=True)\n parametrized_structure.save('compound.gro', overwrite=True)\n\n return parametrized_structure", "def gen_compartments(self, model, options):\n c_init_volume = wc_lang.InitVolume(distribution=wc_ontology[options['c']['init_volume']['distribution']],\n mean=options['c']['init_volume']['mean'],\n std=options['c']['init_volume']['std'])\n c_ph = wc_lang.Ph(distribution=wc_ontology[options['c']['ph']['distribution']],\n mean=options['c']['ph']['mean'],\n std=options['c']['ph']['mean'])\n c = model.compartments.create(id='c', name='Cytosol', init_volume=c_init_volume, ph=c_ph)\n c.init_density = model.parameters.create(id='density_c',\n value=options['c']['init_density']['value'],\n units=unit_registry.parse_units(options['c']['init_density']['units']))\n volume_c = model.functions.create(id='volume_c', units=unit_registry.parse_units('l'))\n\n volume_c.expression, error = wc_lang.FunctionExpression.deserialize(\n f'{c.id} / {c.init_density.id}',\n self.get_rate_law_context(model))\n assert error is None, str(error)", "def genCata(codeName,exePath,inputDict,inputDep,cataName,enumName,tsPath):\n paramFile = path.join(path.dirname(inputDict),'damo.par')\n with open(paramFile,'w') as f:\n f.write('CATA'+'\\n')\n f.write(codeName+'\\n')\n f.write(inputDict+'\\n')\n f.write(inputDep+'\\n')\n f.write(cataName+'\\n')\n f.write(enumName+'\\n')\n f.write(tsPath)\n # Removing files if they exist\n if path.exists(cataName):\n remove(cataName)\n if path.exists(enumName):\n remove(enumName)\n if path.exists(tsPath+path.sep+\"labelCataToIhm_en.ts\"):\n remove(tsPath+path.sep+\"labelCataToIhm_en.ts\")\n if path.exists(tsPath+path.sep+\"labelCataToIhm_fr.ts\"):\n remove(tsPath+path.sep+\"labelCataToIhm_fr.ts\")\n\n runDamocles(exePath, paramFile)\n # Running modification on the ts files\n eficas_translation(path.join(tsPath,\"cata_name2eng_name.ts\"),\n path.join(tsPath,\"labelCataToIhm_en.ts\"), \"en\")\n eficas_translation(path.join(tsPath,\"cata_name2fra_name.ts\"),\n path.join(tsPath,\"labelCataToIhm_fr.ts\"), \"fr\")\n remove(path.join(tsPath,\"cata_name2eng_name.ts\"))\n remove(path.join(tsPath,\"cata_name2fra_name.ts\"))\n remove(paramFile)", "def ppl_positive_cone(n):\n gs = ppl.Generator_System(ppl_zero_point(n))\n l = [0]*n\n for i in range(n):\n gs.insert(ppl.ray(ppl.Variable(i)))\n return ppl.C_Polyhedron(gs)", "def _generate_parameters(self, graph, memory_manager):\n # Generate Node Parameters\n parameter_header = \"#ifndef NETWORK_PARAMETERS_H\\n\"\n parameter_header += \"#define NETWORK_PARAMETERS_H\\n\"\n parameter_header += \"#include \\\"pico-cnn/parameters.h\\\"\\n\\n\"\n parameter_code = \"#include \\\"network_parameters.h\\\"\\n\\n\"\n for node in graph.nodes:\n for num, input in enumerate(node.input_tensors):\n buffer = memory_manager.get_buffer(graph, input)\n data = node.input_tensors[input]\n\n # if node.op_type == \"Gemm\":\n # data = data.transpose()\n\n type_code = \"fp_t \" + buffer.name + \"[]\"\n declaration = \"// \" + str(data.shape) + \"\\n\"\n declaration += \"extern \" + type_code + \";\"\n definition = type_code + \" = {\" + \",\".join((str(x) for x in data.flatten())) + \"};\"\n\n parameter_code += definition + \"\\n\\n\"\n parameter_header += declaration + \"\\n\\n\"\n\n parameter_header += \"#endif \\n\"\n\n self.parameter_header = parameter_header\n self.parameter_code = parameter_code", "def FormG():\n for i in range(2):\n for j in range(2):\n G[i, j] = 0.0\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])", "def sparameters(**kwargs):\n d = sweep(**kwargs)\n d.pop(\"GC_sweeps.lsf\", \"\")\n d[\"main.lsf\"] = \"\\n\".join([\"GC_init;\", \"GC_S_extraction;\"])\n d[\"GC_S_extraction.lsf\"] = open(\n CONFIG[\"grating_coupler\"] / \"GC_S_extraction.lsf\"\n ).read()\n d[\"GC_setup_fibre.lsf\"] = open(\n CONFIG[\"grating_coupler\"] / \"GC_setup_fibre.lsf\"\n ).read()\n d[\n \"main.py\"\n ] = \"\"\"\n\nimport pathlib\nimport json\nimport lumapi\n\n\ndirpath = pathlib.Path(__file__).parent.absolute()\n\ns = lumapi.FDTD()\ns.cd(str(dirpath))\ns.eval(\"main;\")\n\nd = {k: list(abs(s.getv(k).flatten())) for k in [\"S11\", \"S12\", \"S21\", \"S22\", \"f\"]}\n\nwith open(dirpath / \"GC_sparameters.json\", \"w\") as f:\n f.write(json.dumps(d))\n\n \"\"\"\n return d", "def calc_ent_f1_and_ent_mcc(hyps: List[str], refs: List[str], vocab, c_fun, report_on_canonicals: bool = False, tok_fun=pkt_tokenize):\n\n # requires internal knowledge of the entire universe\n # specifically of names of kb_fields, kbtrv_fields\n\n # FIXME create_KB_on_the_fly and pkt_tokenize \n # are referred to separately in \n # * helpers.py\n # * metrics.py\n # fix this by \n # adding them to cfg\n # or \n # giving it as attribute to the model \n\n\n # define helper functions\n ## eval metrics\n def precision(predictions: List[int], gold_labels: List[int]):\n # TP/(TP+FP) => iterate over positives (predicted)\n positives = len(predictions)\n if positives:\n tp, fp = zip(*[(1,0) if pred in gold_labels else (0,1) for pred in predictions])\n return sum(tp)/positives\n else:\n return 1.\n\n def recall(predictions: List[int], gold_labels: List[int]):\n # TP/(TP+FN) => iterate over ground truths (gold labels)\n truths = len(gold_labels)\n if truths:\n tp, fn = zip(*[(1,0) if gold in predictions else (0,1) for gold in gold_labels])\n return sum(tp)/truths\n else:\n return 1.\n\n f1_ = lambda p, r: 2 * (p*r)/(p+r) if p+r != 0. else 0.\n mcc_ = lambda p, r: sqrt((r+(1/r)-1)*(p+(1/p)-1)) if r != 0. and p != 0. else 0.\n\n # compare ent f1 in trv => lookup vocab indices\n\n f1s = [] # accumulate scores\n mccs = [] # accumulate matthew's correlation coefficients\n num_ents = []\n for i, (hyp,ref) in enumerate(zip(hyps, refs)):\n\n hyp_ents_ref_ents = [] # will hold entity vocabulary indices in the order hyp,ref\n debug = []\n\n for seq in (hyp,ref):\n seq_tokzd = tok_fun(seq)\n canons, indices, matches = c_fun(seq_tokzd) # turn to canonical tokens and return indices that raw tokens were mapped to\n del matches # TODO use matches instead of implementation below FIXME\n\n entities = [\\\n \" \".join([raw for map_idx, raw in zip(indices,seq_tokzd) if map_idx==i])\n for i in range(len((canons)))\n ]\n\n # Filter out tokens that werent changed (noncanonical)\n try:\n canonical_entities, surface_entities = list(zip(*[(c,t) for c,t in zip(canons,entities) if c!=t]))\n except ValueError: # no entities\n canonical_entities, surface_entities = [], []\n \n if report_on_canonicals: \n entities = canonical_entities\n else:\n entities = surface_entities\n \n # record number of decisions made for this hypothesis\n num_ents += [len(entities)]\n\n # Filter out unk IDs\n entities = [tok for tok in entities if not vocab.is_unk(tok)]\n # turn to vocab indices (int)\n seq_enty_voc_indices = [vocab.stoi[entity] for entity in entities]\n\n hyp_ents_ref_ents.append(seq_enty_voc_indices)\n debug.append(entities)\n\n pred, truth = hyp_ents_ref_ents\n\n P = precision(pred,truth)\n R = recall(pred,truth)\n \n f1s.append(f1_(P,R))\n mccs.append(mcc_(P,R))\n\n print(\" ### Validation Classification Metrics Debug ###\\n\"+\\\n f\"hyp: {hyp}\\nref: {ref}\\nentities: {hyp_ents_ref_ents}\\nPrecision: {P}\\nRecall: {R}\"+\\\n \" ### Debug Metrics End ### \")\n \n assert len(hyps) == len(refs) == len(f1s) == len(mccs), (len(hyps), len(refs), len(f1s), len(mccs))\n f1_avg = sum([num_ents[i]*f1s[i] for i in range(len(f1s))]) / sum(num_ents) if len(num_ents) else 0\n mcc_avg = sum([num_ents[i]*mccs[i] for i in range(len(mccs))]) / sum(num_ents) if len(num_ents) else 0\n return f1_avg, mcc_avg", "def f2c_cml_function():\n import sys\n\n F = float(sys.argv[1])\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def generate_compartments(parameterdict):\n\n refcmpts, model = [parameterdict[i] for i in ['refcmpts', 'model']]\n\n peripherals = [] # List for peripheral compartments\n # Iterates through compartments. Adds peripherals to peripheral list,\n # creates main and optionally sub compartment (if in SC model).\n # Doesn't allow multiple main/sub compartments.\n for cmpt in refcmpts:\n if cmpt[2] == 'Peripheral':\n peripherals.append(Compartment(cmpt[0], cmpt[1]))\n\n elif cmpt[2] == 'Main':\n if 'maincmpt' in locals():\n raise ValueError(\"Can't have two main compartments.\")\n else:\n maincmpt = Compartment(cmpt[0], cmpt[1])\n\n elif cmpt[2] == 'Sub' and model == 'sc':\n if 'subcmpt' in locals():\n raise ValueError(\"Can't have two subcompartments.\")\n else:\n subcmpt = Compartment(cmpt[0], cmpt[1])\n if subcmpt not in locals():\n subcmpt = None\n\n return maincmpt, peripherals, subcmpt", "def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals", "def set_norm(self, P={}, C={}, c_t=\"pp\"):\n N = dict()\n if isinstance(P, list):\n Pl = P\n else:\n Pl = [P]\n if isinstance(C, list):\n Cl = C\n else:\n Cl = [C]\n if len(Pl) > 0:\n N['comp_dim'] = len(Pl)\n if len(Cl) > 0:\n if len(Cl) != len(Pl):\n raise ValueError(\"Need same number of principal parts and coefficients to set!\")\n keys = list(Cl[0].keys())\n for j in range(1, N['comp_dim']):\n if list(Cl[j].keys()) != keys:\n raise ValueError(\"Need to set the same coefficients! (or call the method more than once)\")\n else:\n Cl = []\n for j in range(N['comp_dim']):\n Cl.append(C)\n N['Vals'] = list()\n N['Vals'] = list()\n N['SetCs'] = list()\n for i in range(N['comp_dim']):\n N['Vals'].append({})\n N['Vals'].append({})\n N['SetCs'].append([])\n D = self.multiplier().D()\n for j in range(len(D)):\n a = D[j]\n x = self.multiplier().Qv[j]\n # N['Vals'][i][(0,j)]=dict()\n if x == 0:\n if c_t == \"pp\":\n # N['comp_dim']=N['comp_dim']+1\n N['SetCs'][i].append((j, 0))\n if (0, j) in Pl[i]:\n N['Vals'][i][(j, 0)] = Pl[i][(j, 0)]\n else:\n N['Vals'][i][(j, 0)] = 0 # P[(0,0)]\n elif x < 0 and self._holomorphic:\n N['SetCs'][i].append((j, 0))\n N['Vals'][i][(j, 0)] = 0 # P[(0,0)]\n\n for (r, n) in Cl[i].keys():\n if(N['SetCs'][i].count((r, n)) == 0):\n N['SetCs'][i].append((r, n))\n N['Vals'][i][(r, n)] = Cl[i][(r, n)]\n return N", "def _generate(self, feature_map_shape_list, **params):\n pass", "def beta_gen_mnt(p):\n return np.array([-1.0]*int(0.7*p) + [1.0]*(p-int(0.7*p)))", "def p2f (p):\n #return 11000**((p+1)/2)\n #return (p+1)*11000\n return (p+1)*5500", "def MakeGCMCProgram(MaterialInfoList, TemperatureList, PressureList, OutputPath, MakeTorque, GasType,\r\n GasAtomTypeNum, GasAtomType, GasPartialPressure, CutOff, MaterialAtomDictionary,\r\n GasAtomDictionary, SpecialPairList, UseEmap, UsePmap, UsePost, EquilibriumStep, ProductionStep,\r\n TorqueSetting, MuSiCSetting, Nodes, TaskSuffix):\r\n\r\n def MakeAtomAtomFile(GCMCOutputPath,MaterialInfo,GasAtomType,SpecialPairList,GasAtomDictionary,MaterialAtomDictionary):\r\n\r\n with open('%s/atom_atom_file' % (GCMCOutputPath), 'w') as AtomAtomFile:\r\n\r\n AtomAtomFile.write('-'.center(80, '-'))\r\n AtomAtomFile.write('\\n')\r\n\r\n for i in range(len(MaterialInfo[5])):\r\n for j in range(len(MaterialInfo[5])):\r\n if i <= j:\r\n AtomAtomFile.write('%-10s%-10sOFF\\n' % (MaterialInfo[5][i], MaterialInfo[5][j]))\r\n\r\n for k in range(len(GasAtomType)):\r\n for l in range(len(GasAtomType)):\r\n if k <= l:\r\n Key=False\r\n for SpecialPair in SpecialPairList:\r\n if GasAtomType[k] in SpecialPair[0] and GasAtomType[l] in SpecialPair[0] and GasAtomType[k]!=GasAtomType[l]:\r\n Key=True\r\n if Key==False:\r\n num1 = GasAtomDictionary.get(GasAtomType[k])\r\n num2 = GasAtomDictionary.get(GasAtomType[l])\r\n sig1 = str('%.3f' % ((float(num1[0]) + float(num2[0])) / 2))\r\n eps1 = str('%.3f' % ((float(num1[1]) * float(num2[1])) ** 0.5))\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%[email protected]\\n%-10s%-10s%-10sHICUT@%[email protected]\\n'%(GasAtomType[k],GasAtomType[l],'LJ',sig1,eps1,CutOff,GasAtomType[k],GasAtomType[l],'WFCOUL',CutOff))\r\n\r\n for h in range(len(GasAtomType)):\r\n for g in range(len(MaterialInfo[5])):\r\n Key = False\r\n for SpecialPair in SpecialPairList:\r\n if GasAtomType[h] in SpecialPair[0] and MaterialInfo[5][g] in SpecialPair[0]:\r\n Key = True\r\n if Key==False:\r\n num3 = GasAtomDictionary.get(GasAtomType[h])\r\n num4 = MaterialAtomDictionary.get(MaterialInfo[5][g])\r\n sig2 = str('%.3f' % ((float(num3[0]) + float(num4[0])) / 2))\r\n eps2 = str('%.3f' % ((float(num3[1]) * float(num4[1])) ** 0.5))\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%[email protected]\\n%-10s%-10s%-10sHICUT@%[email protected]\\n'%(GasAtomType[h],MaterialInfo[5][g],'LJ',sig2,eps2,CutOff,GasAtomType[h],MaterialInfo[5][g],'WFCOUL',CutOff))\r\n\r\n for m in SpecialPairList:\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%[email protected]\\n%-10s%-10s%-10sHICUT@%[email protected]\\n'%(m[0][0],m[0][1],'LJ',m[1][0],m[1][1],CutOff,m[0][0],m[0][1],'WFCOUL',CutOff))\r\n\r\n AtomAtomFile.write('-'.center(80, '-'))\r\n\r\n def MakeIntramolecularFile(GCMCOutputPath,MaterialInfo,GasType):\r\n\r\n with open('%s/intramolecular_file' % (GCMCOutputPath), 'w') as IntraFile:\r\n IntraFile.write('Intra: %s'%(MaterialInfo[7]))\r\n for i in GasType:\r\n IntraFile.write('\\nIntra: %s'%(i))\r\n\r\n def MakeMoleMoleFile(GCMCOutputPath,MaterialInfo,GasType,UsePmap,UseEmap,GasAtomTypeNum,GasAtomType,GasAtomDictionary):\r\n\r\n with open('%s/mole_mole_file' % (GCMCOutputPath), 'w') as MoleMole:\r\n MoleMole.write('%s %s NCOUL OFF\\n%s %s COUL OFF' % (MaterialInfo[7],MaterialInfo[7],MaterialInfo[7],MaterialInfo[7]))\r\n\r\n for i in range(len(GasType)):\r\n for j in range(len(GasType)):\r\n if i <= j:\r\n MoleMole.write('\\n\\n%s %s NCOUL BASIC LJ FAST\\n%s %s COUL BASIC FAST WFCOUL'%(GasType[i],GasType[j],GasType[i],GasType[j]))\r\n\r\n MoleMole.write('\\n')\r\n\r\n if UsePmap==True and MaterialInfo[6]==True:\r\n Key=0\r\n for k in range(len(GasType)):\r\n MoleMole.write('\\n%s %s NCOUL MAP@%s FAST '%(GasType[k],MaterialInfo[7],MaterialInfo[7]))\r\n for l in GasAtomType[Key:Key+GasAtomTypeNum[i]]:\r\n pseudo = l.split('_')\r\n if pseudo[0]!='M' and GasAtomDictionary.get(l)[0]!='0':\r\n MoleMole.write('%s@PMAP@%s_in_%s.pmap '%(l,l,MaterialInfo[7]))\r\n Key=Key+GasAtomTypeNum[i]\r\n else:\r\n for k in GasType:\r\n MoleMole.write('\\n%s %s NCOUL BASIC LJ FAST'%(k,MaterialInfo[7]))\r\n\r\n MoleMole.write('\\n')\r\n\r\n if UseEmap==True and MaterialInfo[6]==True:\r\n for m in GasType:\r\n MoleMole.write('\\n%s %s COUL MAP@%s FAST all@EMAP@%s_all.emap'%(m,MaterialInfo[7],MaterialInfo[7],MaterialInfo[7]))\r\n else:\r\n for m in GasType:\r\n MoleMole.write('\\n%s %s COUL BASIC FAST WFCOUL'%(m,MaterialInfo[7]))\r\n\r\n def MakeEquilibriumGCMC(GCMCOutputPath,GasType,MaterialInfo,EquilibriumStep,GasAtomType,Temperature,Pressure,PartialList):\r\n\r\n with open('%s/equilibrium_gcmc.ctr'%(GCMCOutputPath),'w') as EquilibriumGCMC:\r\n EquilibriumGCMC.write('''------ General Information ------\r\n%s molecule in %s\r\n%s # No. of iterations\r\n10000 # No. of steps between writes to output/log file\r\n10000 # No. of steps between writes to crash file\r\n100000 # No. of steps between writes to config. file\r\n1 # Start numbering simulations from .\r\n30728 # Iseed\r\n3 # specifies contents of config file, (3) only nmoles, nrg ,pair nrg and coords written\r\n%s_in_%s_res # Restart File to write to\r\n%s_in_%s_con # Configuration File\r\n\r\n------ Atomic Types ------\r\n%s # number of atomic types '''%(' '.join(GasType),MaterialInfo[7],EquilibriumStep,\r\n '_'.join(GasType),MaterialInfo[7],'_'.join(GasType),\r\n MaterialInfo[7],len(MaterialInfo[5])+len(GasAtomType)))\r\n\r\n for i in GasAtomType:\r\n EquilibriumGCMC.write('\\n\\n%s\\n%s.atm'%(i,i))\r\n\r\n for j in MaterialInfo[5]:\r\n EquilibriumGCMC.write('\\n\\n%s\\n%s.atm'%(j,j))\r\n\r\n EquilibriumGCMC.write('''\\n------ Molecule Types ------\r\n%s # number of molecular types'''%(len(GasType)+1))\r\n\r\n for k in GasType:\r\n EquilibriumGCMC.write('\\n\\n%s\\n%s.mol'%(k,k))\r\n\r\n EquilibriumGCMC.write('''\\n\\n%s # sorbate\r\n%s.mol # sorbate coordiCates file\r\n------ Simulation Cell Information ------\r\n%s # Fundamental cell file\r\n%s # No. of unit cells in x, y, z direction\r\n1, 1, 1 # (1 = Periodic) in x, y, z\r\n------ Forcefield Information ------\r\nBASIC\r\nMOL\r\natom_atom_file # atom-atom interaction file\r\nmole_mole_file # sorbate-sorbate interaction file\r\nintramolecular_file # intramolecular interaction file/specification\r\n------ Ideal Parameters ------\r\nIdeal # Equation of State\r\n%s # no. of sorbates'''%(MaterialInfo[7],MaterialInfo[7],MaterialInfo[7],', '.join(MaterialInfo[4]),len(GasType)))\r\n\r\n for l in GasType:\r\n EquilibriumGCMC.write('\\n%s' % (l))\r\n\r\n EquilibriumGCMC.write('''\\n------ GCMC Information -------\r\n1 # No. of iterations\r\n%s # temperature\r\nIdeal Parameters # Tag for the equation of state (NULL = Ideal Gas)\r\n1 # No. of simulation points\r\n5000 # Block size for statistics\r\n%s # no. of sorbates'''%(Temperature, len(GasType)))\r\n\r\n for m in range(len(GasType)):\r\n EquilibriumGCMC.write('''\\n-------\r\n%s # Sorbate Came\r\n%s # pressure(kpa)\r\nNull # sitemap fileCame (Null = no sitemap)\r\n4 # no of gcmc movetypes\r\n2.0, 2.0, 1.0, 1.0 # move type weights\r\nRINSERT # type of move\r\nRDELETE # type of move\r\nRTRANSLATE # type of move\r\n0.2, 1 # Delta Translate, adjust delta option (0=NO, 1=YES)\r\nRROTATE\r\n0.2, 1 # Delta Translate, adjust delta option (0=NO, 1=YES)'''%(GasType[m],PartialList[m]*Pressure))\r\n\r\n EquilibriumGCMC.write('\\n------ Configuration Initialization ------')\r\n\r\n for n in GasType:\r\n EquilibriumGCMC.write('\\n%s # Sorbate_Type\\nGCMC NULL'%(n))\r\n\r\n EquilibriumGCMC.write('''\\n%s # Sorbent_Type\r\nFIXED NULL\r\n------ Main Datafile Information ------\r\nEnergy, position, pair_energy # contents of datafile'''%(MaterialInfo[7]))\r\n\r\n def MakeProductionGCMC(GCMCOutputPath,GasType,MaterialInfo,ProductionStep,GasAtomType,Temperature,Pressure,PartialList):\r\n\r\n with open('%s/production_gcmc.ctr' % (GCMCOutputPath), 'w') as ProductionGCMC:\r\n ProductionGCMC.write('''------ General Information ------\r\n%s molecule in %s\r\n%s # No. of iterations\r\n5000 # No. of steps between writes to output/log file\r\n5000 # No. of steps between writes to crash file\r\n20000 # No. of steps between writes to config. file\r\n2 # Start numbering simulations from .\r\n30728 # Iseed\r\n3 # specifies contents of config file, (3) only nmoles, nrg ,pair nrg and coords written\r\n%s_in_%s_res # Restart File to write to\r\n%s_in_%s_con # Configuration File\r\n\r\n------ Atomic Types ------\r\n%s # number of atomic types '''%(' '.join(GasType),MaterialInfo[7],ProductionStep,\r\n '_'.join(GasType),MaterialInfo[7],'_'.join(GasType),\r\n MaterialInfo[7],len(MaterialInfo[5])+len(GasAtomType)))\r\n\r\n for i in GasAtomType:\r\n ProductionGCMC.write('\\n\\n%s\\n%s.atm'%(i,i))\r\n\r\n for j in MaterialInfo[5]:\r\n ProductionGCMC.write('\\n\\n%s\\n%s.atm'%(j,j))\r\n\r\n ProductionGCMC.write('''\\n------ Molecule Types ------\r\n%s # number of molecular types'''%(len(GasType)+1))\r\n\r\n for k in GasType:\r\n ProductionGCMC.write('\\n\\n%s\\n%s.mol'%(k,k))\r\n\r\n ProductionGCMC.write('''\\n\\n%s # sorbate\r\n%s.mol # sorbate coordiCates file\r\n------ Simulation Cell Information ------\r\n%s # Fundamental cell file\r\n%s # No. of unit cells in x, y, z direction\r\n1, 1, 1 # (1 = Periodic) in x, y, z\r\n------ Forcefield Information ------\r\nBASIC\r\nMOL\r\natom_atom_file # atom-atom interaction file\r\nmole_mole_file # sorbate-sorbate interaction file\r\nintramolecular_file # intramolecular interaction file/specification\r\n------ Ideal Parameters ------\r\nIdeal # Equation of State\r\n%s # no. of sorbates'''%(MaterialInfo[7],MaterialInfo[7],MaterialInfo[7],', '.join(MaterialInfo[4]),len(GasType)))\r\n\r\n for l in GasType:\r\n ProductionGCMC.write('\\n%s' % (l))\r\n\r\n ProductionGCMC.write('''\\n------ GCMC Information -------\r\n1 # No. of iterations\r\n%s # temperature\r\nIdeal Parameters # Tag for the equation of state (NULL = Ideal Gas)\r\n1 # No. of simulation points\r\n1000 # Block size for statistics\r\n%s # no. of sorbates'''%(Temperature, len(GasType)))\r\n\r\n for m in range(len(GasType)):\r\n ProductionGCMC.write('''\\n-------\r\n%s # Sorbate Came\r\n%s # pressure(kpa)\r\nNull # sitemap fileCame (Null = no sitemap)\r\n4 # no of gcmc movetypes\r\n2.0, 2.0, 1.0, 1.0 # move type weights\r\nRINSERT # type of move\r\nRDELETE # type of move\r\nRTRANSLATE # type of move\r\n0.2, 1 # Delta Translate, adjust delta option (0=NO, 1=YES)\r\nRROTATE\r\n0.2, 1 # Delta Translate, adjust delta option (0=NO, 1=YES)'''%(GasType[m],PartialList[m]*Pressure))\r\n\r\n ProductionGCMC.write('\\n------ Configuration Initialization ------')\r\n\r\n for n in GasType:\r\n ProductionGCMC.write('\\n%s # Sorbate_Type\\nRESTARTFILE %s_in_%s_res.1'%(n,'_'.join(GasType),MaterialInfo[7]))\r\n\r\n ProductionGCMC.write('''\\n%s # Sorbent_Type\r\nRESTARTFILE %s_in_%s_res.1\r\n------ Main Datafile Information ------\r\nEnergy, position, pair_energy # contents of datafile'''%(MaterialInfo[7],'_'.join(GasType),MaterialInfo[7]))\r\n\r\n def MakePostFile(GCMCOutputPath,GasType,MaterialInfo):\r\n\r\n with open('%s/post.ctr'%(GCMCOutputPath),'w') as Post:\r\n Post.write('''------ Post Processor Information ------\r\nGCMC # Type of simlation GCMC, MD\r\n%s_in_%s_con # basename for config files what your .con files are called in your gcmc folder\r\n2 # The first and last numbers of the .con files that were created in the gcmc folder\r\nregenerated_post # name for new ctrlfile that will be regenerated\r\nresult_post # Base name for output files\r\n20, 0 # Percentages of data to skipped at start and end\r\n\r\n------ Post : Energy Average Info -------\r\n20\r\n------ Post : Loading Average Info --------\r\n20\r\n'''%('_'.join(GasType),MaterialInfo[7]))\r\n\r\n def MakeTorqueFile(GCMCOutputPath,Nodes,TaskSuffix,TorqueSetting,MuSiCSetting,UsePost,OutputPath):\r\n\r\n Node=random.choice(Nodes)\r\n\r\n with open('%s/run_gcmc.pbs' % (GCMCOutputPath), 'w') as Torque:\r\n Torque.write('''#!/bin/bash\r\n#PBS -l nodes=%s\r\n#PBS -N MuSiC_gcmc.%s\r\n#PBS -o music_gcmc_jobs.out\r\n#PBS -j oe\r\n\r\n#\r\n# The number of processors you desire is indicated by replacing\r\n# <nproc> above.\r\n#\r\n\r\n#\r\n# GROMACS path and arguments to mdrun :\r\n#\r\ncd $PBS_O_WORKDIR\r\n\r\n# =============== Environment Setting ============================ #\\n'''%(Node,TaskSuffix))\r\n\r\n for i in TorqueSetting:\r\n Torque.write('%s'%(i))\r\n\r\n Torque.write('''# =============== Don't Change Above Setting ===================== #\r\n\r\necho \"============The computed nodes============\"\r\ncp -f $PBS_NODEFILE NODE.txt\r\necho \"User: \" $USER\r\ncat $PBS_NODEFILE\r\necho \"Job ID: \" $PBS_JOBID\r\necho \"Job Cookie: \" $PBS_JOBCOOKIE\r\necho \"Using executable: \" `which mpirun`\r\necho `date`\r\necho \"============Finished setting==============\"\r\n\r\n# =========== Setting Jobs ============================ #\\n''')\r\n\r\n for j in MuSiCSetting:\r\n Torque.write('%s'%(j))\r\n\r\n Torque.write('''export ATOMSDIR=%s\r\nexport MOLSDIR=%s\r\nexport PMAPDIR=%s\r\nexport EMAPDIR=%s\r\nexport SMAPDIR=%s'''%(os.path.join(OutputPath,'Atoms'),os.path.join(OutputPath,'Mols'),\r\n os.path.join(OutputPath,'Maps'),os.path.join(OutputPath,'Maps'),\r\n os.path.join(OutputPath,'Maps')))\r\n\r\n Torque.write('''\r\n# =========== Setting Jobs ============================ #\r\n\r\n# +++++++++++++++ Start Computing +++++++++++++++++++++ #\r\n\r\nTIME_DIR=$(date '+%Y-%m-%d_%H-%M-%S')\r\nTIME_DIR=\"${USER}_jobs_${TIME_DIR}_${PBS_JOBID}\"\r\nif [ -d /utmp ]; then\r\n TEMP_DIR=/utmp/${USER}/${TIME_DIR}\r\nelse\r\n TEMP_DIR=/temp/${USER}/${TIME_DIR}\r\nfi\r\nmkdir -p ${TEMP_DIR}\r\ncp -rf * ${TEMP_DIR}\r\ncd ${TEMP_DIR}\r\nrm -f music_gcmc_jobs.out\r\necho \"The temp direcotry: \" ${TEMP_DIR}\r\necho \"============Finished setting==============\"\r\n\r\necho \"+++++++++++++ Run MuSic ++++++++++++++++++++++++++++\"\r\nmusic_gcmc equilibrium_gcmc.ctr > equilibrium_gcmc.txt\r\necho `date`\r\nmusic_gcmc production_gcmc.ctr > production_gcmc.txt\r\necho `date`''')\r\n\r\n if UsePost == True:\r\n Torque.write('\\nmusic_post post.ctr > post.txt\\necho `date`')\r\n\r\n Torque.write('''\\necho \"+++++++++++++ Finish MuSic +++++++++++++++++++++++++\"\r\n\r\ncd $PBS_O_WORKDIR\r\ncp -rf ${TEMP_DIR}/* .\r\nrm -rf ${TEMP_DIR}\r\n\r\n\r\necho \"All files were copied back!\"\r\necho \"The work direcotry: \" $PBS_O_WORKDIR\r\necho `date`\r\necho \"============Finished Job ==============\"''')\r\n\r\n def main():\r\n\r\n for MaterialInfo in MaterialInfoList:\r\n for Partial in GasPartialPressure:\r\n for Temperature in TemperatureList:\r\n for Pressure in PressureList:\r\n GCMCOutputPath='%s/%s/%s/%s/%s/%sK/%skPa'%(OutputPath,'GCMC','_'.join(GasType),MaterialInfo[7],Partial,Temperature,Pressure)\r\n\r\n if os.path.exists(GCMCOutputPath):\r\n pass\r\n else:\r\n os.makedirs(GCMCOutputPath)\r\n PartialList = [float(x) for x in Partial.split('_')]\r\n MakeAtomAtomFile(GCMCOutputPath,MaterialInfo,GasAtomType,SpecialPairList,GasAtomDictionary,MaterialAtomDictionary)\r\n MakeIntramolecularFile(GCMCOutputPath,MaterialInfo,GasType)\r\n MakeMoleMoleFile(GCMCOutputPath,MaterialInfo,GasType,UsePmap,UseEmap,GasAtomTypeNum,GasAtomType,GasAtomDictionary)\r\n MakeEquilibriumGCMC(GCMCOutputPath,GasType,MaterialInfo,EquilibriumStep,GasAtomType,Temperature,Pressure,PartialList)\r\n MakeProductionGCMC(GCMCOutputPath,GasType,MaterialInfo,ProductionStep,GasAtomType,Temperature,Pressure,PartialList)\r\n\r\n if MakeTorque==True:\r\n MakeTorqueFile(GCMCOutputPath,Nodes,TaskSuffix,TorqueSetting,MuSiCSetting,UsePost,OutputPath)\r\n if UsePost==True:\r\n MakePostFile(GCMCOutputPath,GasType,MaterialInfo)\r\n\r\n if __name__ == '__main__':\r\n main()", "def test_T2():\n infile = \"cisd/T2.in\"\n assert(os.path.exists(infile))\n with open(infile) as f:\n lines = f.readlines()\n assert(len(lines) == 10)\n\n hl1 = HirataLine(lines[0])\n assert(set(hl1.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl1.summation_indices == '')\n assert(hl1.prefactors == [\"+ 1.0 \"])\n assert(hl1.postfactors == ['v ( p3 p4 h1 h2 )'])\n assert(str(hl1) == lines[0].replace('\\n', ''))\n cl1 = Cc4sLine(hl1)\n assert(set(cl1.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl1.summation_indices == \"\")\n assert(cl1.prefactors == [\"+ 1.0 \"])\n assert(cl1.postfactors == ['Vabij[\"cdij\"]'])\n assert(cl1.to_cpp() == ['( + 1.0 ) * Vabij[\"cdij\"];'])\n\n hl8 = HirataLine(lines[7])\n assert(set(hl8.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl8.summation_indices == ' h6 p5 ')\n assert(\n hl8.prefactors == [\n '+ 1.0 ',\n '- 1.0 * P( p3 p4 h2 h1 => p4 p3 h2 h1 ) ',\n '- 1.0 * P( p3 p4 h2 h1 => p3 p4 h1 h2 ) ',\n '+ 1.0 * P( p3 p4 h2 h1 => p4 p3 h1 h2 ) '\n ]\n )\n assert(\n hl8.postfactors ==\n ['Sum ( h6 p5 )', 't ( p5 p3 h6 h2 )', 'v ( h6 p4 h1 p5 )']\n )\n assert(str(hl8) == lines[7].replace('\\n', ''))\n cl8 = Cc4sLine(hl8)\n assert(set(cl8.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl8.summation_indices == \" n e \")\n assert(\n cl8.prefactors ==\n ['+ 1.0 ',\n '- 1.0 * P( c d j i => d c j i ) ',\n '- 1.0 * P( c d j i => c d i j ) ',\n '+ 1.0 * P( c d j i => d c i j ) ']\n )\n assert(cl8.postfactors == ['Tabij[\"ecnj\"]', 'Viajb[\"ndie\"]'])\n assert(\n cl8.to_cpp() == [\n '( + 1.0 ) * Tabij[\"ecnj\"] * Viajb[\"ndie\"];',\n '( - 1.0 ) * Tabij[\"ednj\"] * Viajb[\"ncie\"];',\n '( - 1.0 ) * Tabij[\"ecni\"] * Viajb[\"ndje\"];',\n '( + 1.0 ) * Tabij[\"edni\"] * Viajb[\"ncje\"];'\n ]\n )", "def ff(process, q2, par, n=4, t0=None):\n flavio.citations.register(\"Leljak:2021vte\")\n pd = process_dict[process]\n mpl = par[process + ' BCL LMVD m+']\n m0 = par[process + ' BCL LMVD m0']\n mB = par[process + ' BCL LMVD m_' + pd['B']]\n mP = par[process + ' BCL LMVD m_' + pd['P']]\n ff = {}\n b={}\n for i in ['f+', 'fT']:\n b[i] = [par[process + ' BCL' + ' f' + f'_{i[1:]}(0)']]+ [ par[process + ' BCL' + ' b' f'_{i[1:]}^{j}'] for j in range(1, n)]\n # evaluate FFs\n ff['f+'] = pole('f+', mpl, q2) * param_fplusT(mB, mP, b['f+'], q2, t0)\n ff['fT'] = pole('fT', mpl, q2) * param_fplusT(mB, mP, b['fT'], q2, t0)\n\n # f0 is modified\n b['f0'] = [par[process + ' BCL' + ' f' + f'_+(0)']]+ [ par[process + ' BCL' + ' b' + f'_0^{j}'] for j in range(1, n+1)] # note the +1\n ff['f0'] = pole('f0', m0, q2) * param_f0(mB, mP, b['f0'], q2, t0)\n return ff", "def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)", "def beta_gen_lasso(p):\n cardi = 0.005\n return np.array([0]*int(p-int(cardi*p)) + [1]*int(cardi*p))", "def __init__(self, pwinput, what=None, values=None, prefixInp='TEMP_PWINPUT_', prefixOut='LOG_'):\n #\n if what == None and values==None:\n raise RuntimeWarning('Test convergence is set to ecutwfc')\n self.what = 'ecutwfc'\n self.values = np.arange(20,80,10)\n #\n self.pwinput = pwinput\n self.what = what\n self.values = values\n self.Ndata = len(values)\n #\n self.energies = None\n #\n self.prefixInp = prefixInp # XXX need to save this ?\n self.prefixOut = prefixOut\n self.inpFiles = []\n self.outFiles = []\n if self.what == 'kpoints':\n for v in values:\n strv = str(v).strip('[]').replace(',','').replace(' ','_')\n self.inpFiles.append(self.prefixInp + what + '_' + strv)\n self.outFiles.append(self.prefixOut + what + '_' + strv)\n else:\n for v in values:\n self.inpFiles.append(self.prefixInp + what + '_' + str(v))\n self.outFiles.append(self.prefixOut + what + '_' + str(v))\n #\n self.inputs_have_been_written = False", "def getEG(n,int_method,func) :\n m = np.asarray([0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,2,2.5,3,3.5,4,\n 4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10])\n bes = np.asarray([0.5,0.47768,0.44879,0.39831,0.25858,0,0.15502,0.25699,\n 0.30896,0.35245,0.39119,0.51822,0.53678,0.54984,0.55847,\n 0.56395,0.57054,0.57950,0.58402,0.58765,0.59512,0.60214,\n 0.60469,0.61143,0.61789,0.62443,0.63097,0.63694])\n p = np.asarray([1,0.85417,0.94685,1.04467,2.55052,0,1.59086,\n 1.00670,0.88866,0.83763,0.81030,0.76108,0.83093,0.86863,\n 0.89233,0.90909,0.92097,0.93007,0.93735,0.94332,0.94813,\n 0.95193,0.95557,0.95864,0.96107,0.96360,0.96570,\n 0.96788])\n h0 = np.asarray([0,-0.03567,-0.04808,-0.04315,-0.01879,0,0.00041,0.00069,\n 0.00639,0.01405,0.02294,0.07814,0.13994,0.19278,0.23793,\n 0.27678,0.31039,0.33974,0.36585,0.38917,0.41003,0.42891,\n 0.44621,0.46195,0.47644,0.48982,0.50223,0.51379])\n h1 = np.asarray([0,0.26899, 0.10571,0.01763,-0.39382,0,0.15211,0.05665,\n 0.00933,-0.02791,-0.05876,-0.16720,-0.13033,-0.10455 ,\n -0.08618,-0.07208,-0.06179,-0.05369,-0.04715,-0.04176,\n -0.03742,-0.03408,-0.03081,-0.02808,-0.02599,-0.02375,\n -0.02194,-0.02004])\n h2 = np.asarray([0,-0.09016,-0.06893,-0.04971,-0.08828,0,-0.03341,\n -0.03964,-0.04456,-0.04775,-0.04984,-0.05381,-0.03570,\n -0.02476,-0.01789,-0.01333,-0.01028,-0.00812,-0.00653,\n -0.00534,-0.00444,-0.00376,-0.00319,-0.00274,-0.00238,\n -0.00207,-0.00182,-0.00160])\n h3 = np.asarray([0,0.03993,0.03363,0.02216,-0.00797,0,0.00899,0.01172,\n 0.01150,0.01026,0.00860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h0 = splev(n,splrep(m, h0))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h0 = griddata(m, h0, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h0,int_h1,int_h2,int_h3])", "def P2G_func(self, dt, P):\n p_C = ti.static(self.p_C)\n p_v = ti.static(self.p_v)\n p_x = ti.static(self.p_x)\n g_m = ti.static(self.g_m)\n g_v = ti.static(self.g_v)\n p_F = ti.static(self.p_F)\n p_Jp = ti.static(self.p_Jp)\n\n base = ti.floor(g_m.getG(p_x[P] - 0.5 * g_m.dx)).cast(Int)\n fx = g_m.getG(p_x[P]) - base.cast(Float)\n\n # Here we adopt quadratic kernels\n w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2]\n # dw = [fx - 1.5, -2.0 * (fx - 1), fx - 0.5]\n\n # # TODO affine would do this in P2G.. why\n # p_F[P] = (ti.Matrix.identity(Int, self.dim) + dt * p_C[P]) @ p_F[P]\n\n force = ti.Matrix.zero(Float, self.dim, self.dim)\n # want to decrease branching\n if self.p_material_id[P] == MaType.elastic:\n force = self.elasticP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.liquid:\n force = self.liquidP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.snow:\n force = self.snowP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.sand:\n force = self.sandP2Gpp(P, dt)\n\n affine = force + self.cfg.p_mass * p_C[P]\n for offset in ti.static(ti.grouped(self.stencil_range3())):\n # print(\"P2G: \", offset)\n dpos = g_m.getW(offset.cast(Float) - fx)\n\n weight = 1.0\n for d in ti.static(range(self.dim)):\n weight *= w[offset[d]][d]\n\n # dweight = ts.vecND(self.dim, self.cfg.inv_dx)\n # for d1 in ti.static(range(self.dim)):\n # for d2 in ti.static(range(self.dim)):\n # if d1 == d2:\n # dweight[d1] *= dw[offset[d2]][d2]\n # else:\n # dweight[d1] *= w[offset[d2]][d2]\n\n # force = - self.cfg.p_vol * kirchoff @ dweight\n # TODO ? AFFINE\n # g_v[base + offset] += self.cfg.p_mass * weight * (p_v[P] + p_C[P] @ dpos) # momentum transfer\n # TODO Got lots of simultaneous atomic here\n g_v[base + offset] += weight * (self.cfg.p_mass * self.p_v[P] + affine @ dpos)\n g_m[base + offset] += weight * self.cfg.p_mass\n\n # g_v[base + offset] += dt * force", "def load_charmm_ff_params(fname):\n with open(fname) as f:\n lines = f.readlines()\n\n comment_stripper = re.compile(r'[!\\*].*')\n ffp = ForceFieldParams(fname)\n\n current_section = None\n for i in range(len(lines)):\n # Ignore comments and blank lines\n line = comment_stripper.sub('', lines[i].strip())\n if line == '': continue\n\n tokens = line.split()\n skip_line = False\n for section in ('ATOM', 'BOND', 'ANGL', 'DIHE', 'IMPR', 'NONB', 'CMAP'):\n if tokens[0].startswith(section):\n current_section = section\n skip_line = True\n break\n\n if skip_line: continue\n\n if current_section is 'BOND':\n key1, key2 = key_names((tokens[0], tokens[1]))\n ffp.bonds[key1] = ffp.bonds[key2] = {\n 'force_constant': float(tokens[2]),\n 'equilibrium_distance': float(tokens[3])\n }\n elif current_section is 'ANGL':\n # TODO: Urey-Bradley terms\n key1, key2 = key_names((tokens[0], tokens[1], tokens[2]))\n ffp.angles[key1] = ffp.angles[key2] = {\n 'force_constant': float(tokens[3]),\n 'equilibrium_angle': float(tokens[4]) * pi / 180.0\n }\n elif current_section is 'DIHE':\n key1, key2 = key_names((tokens[0], tokens[1], tokens[2], tokens[3]))\n ffp.dihedrals[key1] = ffp.dihedrals[key2] = {\n 'force_constant': float(tokens[4]),\n 'multiplicity': float(tokens[5]),\n 'delta': float(tokens[6])\n }\n elif current_section is 'IMPR':\n key = key_names((tokens[0], tokens[1], tokens[2], tokens[3]))\n else:\n # Unknown line type\n continue\n return ffp", "def createInstanceSource(pcol, path, nr_robots, smallest_robot_id):\n\n # prevent alphabet related bugs by including e and f objects in alphabet\n if (\"e\" not in pcol.A):\n pcol.A.append(\"e\")\n if (\"f\" not in pcol.A):\n pcol.A.append(\"f\")\n\n with open(path + \".c\", \"w\") as fout:\n fout.write(\"\"\"#include \"%s.h\"\n\n#ifdef NEEDING_WILDCARD_EXPANSION\n #include \"wild_expand.h\"\n#endif\n\n#ifdef PCOL_SIM\"\"\" % path.split(\"/\")[-1]) #only filename\n\n fout.write(\"\"\"\\n char* objectNames[] = {[NO_OBJECT] = \"no_object\", \"\"\")\n for obj in pcol.A:\n fout.write(\"\"\"[OBJECT_ID_%s] = \"%s\", \"\"\" % (obj.upper(), obj))\n\n fout.write(\"\"\"};\n char* agentNames[] = {\"\"\")\n for ag_name in pcol.B:\n fout.write(\"\"\"[AGENT_%s] = \"%s\", \"\"\" % (ag_name.upper(), ag_name))\n fout.write(\"\"\"};\n#endif\n\n//the smallest kilo_uid from the swarm\nconst uint16_t smallest_robot_uid = %d;\n//the number of robots that make up the swarm\nconst uint16_t nr_swarm_robots = %d;\n\nvoid lulu_init(Pcolony_t *pcol) {\"\"\" % (smallest_robot_id, nr_robots) )\n\n # call initPcolony()\n fout.write(\"\"\"\\n //init Pcolony with alphabet size = %d, nr of agents = %d, capacity = %d\n initPcolony(pcol, %d, %d, %d);\"\"\" % (len(pcol.A), len(pcol.B), pcol.n, len(pcol.A), len(pcol.B), pcol.n))\n fout.write(\"\"\"\\n //Pcolony.alphabet = %s\"\"\" % pcol.A)\n\n # init environment\n fout.write(\"\"\"\\n\\n //init environment\"\"\")\n counter = 0;\n for obj, nr in pcol.env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->env.items[%d].nr = %d;\\n\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init global pswarm environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init INPUT global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.in_global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.in_global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init INPUT global pswarm environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init OUTPUT global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.out_global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.out_global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init OUTPUT global pswarm environment\"\"\")\n\n for ag_name in pcol.B:\n fout.write(\"\"\"\\n\\n //init agent %s\"\"\" % ag_name)\n #fout.write(\"\"\"\\n\\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);\"\"\" % (ag_name.upper(), len(pcol.agents[ag_name].programs)))\n fout.write(\"\"\"\\n\\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);\"\"\" % (ag_name.upper(), getNrOfProgramsAfterExpansion(pcol.agents[ag_name], nr_robots- 1)))\n\n fout.write(\"\"\"\\n //init obj multiset\"\"\")\n counter = 0;\n for obj, nr in pcol.agents[ag_name].obj.items():\n #replace %id and * with $id and $ respectively\n\n for i in range(nr):\n fout.write(\"\"\"\\n pcol->agents[AGENT_%s].obj.items[%d] = OBJECT_ID_%s;\"\"\" % (ag_name.upper(), counter, obj.upper()))\n counter += 1\n\n fout.write(\"\"\"\\n\\n //init programs\"\"\")\n for prg_nr, prg in enumerate(pcol.agents[ag_name].programs):\n fout.write(\"\"\"\\n\\n initProgram(&pcol->agents[AGENT_%s].programs[%d], %d);\"\"\" % (ag_name.upper(), prg_nr, getNrOfRulesWithoutRepetitions(prg)))\n fout.write(\"\"\"\\n //init program %d: < %s >\"\"\" % (prg_nr, prg.print()))\n\n rule_index = 0\n for rule_nr, rule in enumerate(prg):\n # skip rules that contain identical operands and thus have no effect\n if (rule.lhs == rule.rhs and rule.lhs == 'e' and rule.main_type != sim.RuleType.conditional):\n continue\n\n fout.write(\"\"\"\\n //init rule %d: %s\"\"\" % (rule_nr, rule.print(toString=True)) )\n if (rule.main_type != sim.RuleType.conditional):\n fout.write(\"\"\"\\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_%s, OBJECT_ID_%s, OBJECT_ID_%s, NO_OBJECT, NO_OBJECT);\"\"\" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.lhs.upper(), rule.rhs.upper()))\n else:\n fout.write(\"\"\"\\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_CONDITIONAL_%s_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s);\"\"\" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.alt_type.name.upper(), rule.lhs.upper(), rule.rhs.upper(), rule.alt_lhs.upper(), rule.alt_rhs.upper()))\n\n #increase rule_index\n rule_index += 1\n fout.write(\"\"\"\\n //end init program %d\n pcol->agents[AGENT_%s].init_program_nr++;\"\"\" % (prg_nr, ag_name.upper()))\n fout.write(\"\"\"\\n //end init programs\"\"\")\n\n fout.write(\"\"\"\\n //end init agent %s\"\"\" % ag_name)\n\n fout.write(\"\"\"\\n}\"\"\")\n fout.write(\"\"\"\\n\\nvoid lulu_destroy(Pcolony_t *pcol) {\n //destroys all of the subcomponents\n destroyPcolony(pcol);\n}\"\"\")\n fout.write(\"\"\"\\n\n#ifdef NEEDING_WILDCARD_EXPANSION\nuint16_t expand_pcolony(Pcolony_t *pcol, uint16_t my_id) {\n //used for a cleaner iteration through the P colony\n //instead of using agents[i] all of the time, we use just agent\n Agent_t *agent;\n\"\"\")\n\n fout.write(\"\"\"\\n uint8_t obj_with_id[] = {\"\"\")\n obj_with_id_size = 0\n for obj in pcol.A:\n if (\"_W_ID\" in obj):\n fout.write(\"OBJECT_ID_%s, \" % obj.upper())\n obj_with_id_size += 1\n fout.write(\"\"\"};\n uint8_t obj_with_id_size = %d;\"\"\" % (obj_with_id_size))\n\n fout.write(\"\"\"\\n uint8_t obj_with_any[] = {\"\"\")\n obj_with_any_size = 0\n is_obj_with_any_followed_by_id = []\n for i, obj in enumerate(pcol.A):\n if (obj.endswith(\"_W_ALL\")):\n fout.write(\"OBJECT_ID_%s, \" % obj.upper())\n # if we are at least 2 objects before the end of the list\n if (i < len(pcol.A) - 1):\n # check if this _$ wildcarded object is followed by a _$id object\n if (\"_W_ID\" in pcol.A[i+1]):\n is_obj_with_any_followed_by_id.append(1)\n else:\n is_obj_with_any_followed_by_id.append(0)\n else:\n # this (_$) object is the last one in the list\n is_obj_with_any_followed_by_id.append(0)\n obj_with_any_size += 1\n fout.write(\"\"\"};\n uint8_t obj_with_any_size = %d;\n uint8_t is_obj_with_any_followed_by_id[] = {%s};\"\"\" % (obj_with_any_size,\n str(is_obj_with_any_followed_by_id).replace(\"[\", \"\").replace(\"]\", \"\")))\n\n fout.write(\"\"\"\\n\\n uint16_t my_symbolic_id = my_id - smallest_robot_uid;\n\n //replace W_ID wildcarded objects with the object corresponding to the symbolic id\n // e.g.: B_W_ID -> B_0 for my_symbolic_id = 0\n replacePcolonyWildID(pcol, obj_with_id, obj_with_id_size, my_symbolic_id);\n\n //expand each obj_with_any[] element into nr_swarm_robots objects except my_symbolic id.\n // e.g.: B_W_ALL -> B_0, B_2 for nr_swarm_robots = 3 and my_symbolic_id = 1\n expandPcolonyWildAny(pcol, obj_with_any, is_obj_with_any_followed_by_id, obj_with_any_size, my_symbolic_id, nr_swarm_robots);\n\n return my_symbolic_id;\n}\n#endif\"\"\")", "def beta_gen_posmnt(p):\n return np.array([0.0]*int(0.7*p) + [1.0]*(p-int(0.7*p)))", "def __init__(self, **kwargs):\n\n polymer_type = \"PE\"\n helice = Helice()\n num_monomers = 30\n tacticity = \"\"\n chiriality = \"\"\n head_tail_defect_ratio = 0\n configs = 30\n infinite = False\n\n for key in kwargs:\n if key == \"polymer_type\":\n polymer_type = kwargs[\"polymer_type\"]\n elif key == \"helice\":\n helice = kwargs[\"helice\"]\n elif key == \"num_monomers\":\n num_monomers = kwargs[\"num_monomers\"]\n if is_integer_num(num_monomers):\n if num_monomers < 1:\n raise ValueError(\n \"Number of monomers should be equal or larger than 1\"\n )\n else:\n raise ValueError(\"Number of monomers should be an integer\")\n elif key == \"tacticity\":\n tacticity = kwargs[\"tacticity\"]\n elif key == \"chiriality\":\n chiriality = kwargs[\"chiriality\"]\n elif key == \"head_tail_defect_ratio\":\n head_tail_defect_ratio = kwargs[\"head_tail_defect_ratio\"]\n elif key == \"configs\":\n configs = kwargs[\"configs\"]\n elif key == \"infinite\":\n infinite = kwargs[\"infinite\"]\n else:\n raise KeyError(\n \"Unknown input %s for Chain class\\n Please see help for more information\"\n % key\n )\n\n if polymer_type not in polymer_types:\n raise ValueError(\n polymer_type\n + \" do not exist in our library, please consider using custom feature\"\n )\n self.polymer_type = polymer_types[polymer_type]\n\n if self.polymer_type.helicity:\n self.custom = 0\n else:\n self.custom = 1\n\n if self.custom:\n print(\"Warning: Custom type, only read helice motifs and turns info\")\n self.helice = helice\n\n if not 0 <= (head_tail_defect_ratio) <= 1:\n raise ValueError(\n \"Defect ratio of head to head and tail to tail connections is\",\n head_tail_defect_ratio,\n \"and should be in the range of [0,1]\",\n )\n self.head_tail_defect_ratio = head_tail_defect_ratio\n\n self.unit_num_monomers = 1\n if \"num_monomers\" not in kwargs:\n if infinite:\n num_monomers = 2\n else:\n num_monomers = 1\n\n self.num_monomers = num_monomers\n\n self.tacticity = tacticity\n if self.tacticity:\n if self.tacticity == \"N/A\":\n self.tacticity = \"\"\n else:\n print(\"Warning: Custom type does not have tacticity\")\n self.tacticity = \"\"\n\n self.chiriality = chiriality\n if self.chiriality:\n if self.chiriality == \"N/A\":\n self.chiriality = \"\"\n else:\n print(\"Warning: Custom type does not have chiriality\")\n self.chiriality = \"\"\n\n self.infinite = infinite\n\n else:\n monomer_backbone_atoms = len(self.polymer_type.backbone_atoms)\n\n if helice.atoms % monomer_backbone_atoms:\n raise Exception(\n \"Number of backbone atoms in a motif must be multiple of number of monomer backbone atoms %d\\n\"\n % monomer_backbone_atoms\n )\n if tacticity == \"syndiotactic\":\n multiple = int(monomer_backbone_atoms * 2 / helice.atoms)\n if (multiple * helice.atoms) % (monomer_backbone_atoms * 2):\n raise Exception(\n \"Number of backbone atoms in a motif for syndiotactic configuration must be multiple of twice of \\\n the number of monomer backbone atoms %d\\n\"\n % monomer_backbone_atoms\n * 2\n )\n elif multiple != 1:\n print(\n \"Number of backbone atoms in a motif for syndiotactic configuration should be multiple of twice \\\n of the number of monomer backbone atoms %d\\n\"\n % (monomer_backbone_atoms * 2)\n )\n print(\n \"Trying Helice_%d_%d_%d...\"\n % (\n helice.atoms * multiple,\n helice.motifs,\n helice.turns * multiple,\n )\n )\n helice = Helice(\n helice.atoms * multiple, helice.motifs, helice.turns * multiple\n )\n # else:\n # if monomer_backbone_atoms != helice.atoms:\n # raise ValueError(\"Number of backbone atoms in a motif must be %d\" % helice.atoms)\n helice_backbone_atoms = helice.atoms * helice.motifs\n self.helice = helice\n\n if not 0 <= (head_tail_defect_ratio) <= 1:\n raise ValueError(\n \"Defect ratio of head to head and tail to tail connections is\",\n head_tail_defect_ratio,\n \"and should be in the range of [0,1]\",\n )\n self.head_tail_defect_ratio = head_tail_defect_ratio\n\n self.unit_num_monomers = int(helice_backbone_atoms / monomer_backbone_atoms)\n if \"num_monomers\" not in kwargs:\n if infinite:\n if tacticity == \"atactic\" or head_tail_defect_ratio:\n num_monomers = 10 * self.unit_num_monomers\n elif helice_backbone_atoms > 2:\n num_monomers = self.unit_num_monomers\n else:\n num_monomers = 2\n\n if num_monomers < self.unit_num_monomers:\n raise ValueError(\n \"Number of monomers should be equal or larger than %d in order to generate Helice_%s chain.\\nCurrent \\\n number of monomers is %d\"\n % (self.unit_num_monomers, helice, num_monomers)\n )\n\n if infinite:\n if num_monomers % self.unit_num_monomers:\n raise ValueError(\n \"Number of monomers should be multiple of %d in order to generate infinite periodic Helice_%s \\\n chain.\\nCurrent number of monomers is %d\"\n % (self.unit_num_monomers, helice, num_monomers)\n )\n elif num_monomers * monomer_backbone_atoms < 3:\n raise ValueError(\n \"Number of backbone atoms should be more than 2 in order to create infinite periodic \\\n chain.\\nCurrent number of backbone atoms along the periodic chain is %d\\nPlease increate \\\n number of monomers.\"\n % (num_monomers * monomer_backbone_atoms)\n )\n self.num_monomers = num_monomers + 2 if infinite else num_monomers\n\n self.tacticity = tacticity\n if self.tacticity:\n if self.tacticity == \"N/A\":\n self.tacticity = \"\"\n elif self.tacticity not in [\"isotactic\", \"atactic\", \"syndiotactic\"]:\n raise TypeError(\n \"Unknown tacticity, please specify one of the following: isotactic, atactic and syndiotactic\"\n )\n elif not self.polymer_type.side_atom:\n raise ValueError(\"Please specify side_atom\")\n\n self.chiriality = chiriality\n if str(self.helice) in [\"2_1_1\", \"4_1_2\"]:\n self.torsion_seq = [180, 180, 180, 180]\n if self.chiriality:\n self.chiriality = \"\"\n print(\"Zig-zag conformation does not have chiriality\")\n elif str(self.helice) in [\"2_2_1\", \"4_2_2\"]:\n if self.chiriality == \"left\":\n self.torsion_seq = [300, 300, 300, 300]\n elif self.chiriality == \"right\":\n self.torsion_seq = [60, 60, 60, 60]\n else:\n raise ValueError(\"Please specify chiriality: left or right\")\n elif str(self.helice) in [\"2_3_1\", \"4_3_2\"]:\n if self.chiriality == \"left\":\n self.torsion_seq = [180, 300, 180, 300]\n elif self.chiriality == \"right\":\n self.torsion_seq = [60, 180, 60, 180]\n else:\n raise ValueError(\"Please specify chiriality: left or right\")\n elif str(self.helice) == \"4_1_1\":\n self.torsion_seq = [60, 180, 300, 180]\n if self.chiriality:\n self.chiriality = \"\"\n print(\"Helice_4_1_1 conformation does not have chiriality\")\n elif str(self.helice) == \"4_2_1\":\n if self.chiriality == \"left\":\n self.torsion_seq = [180, 180, 300, 300]\n elif self.chiriality == \"right\":\n self.torsion_seq = [60, 60, 180, 180]\n else:\n raise ValueError(\"Please specify chiriality: left or right\")\n elif str(self.helice) == \"4_3_1\":\n if self.chiriality == \"left\":\n if self.helice.sub_type:\n self.torsion_seq = [180, 300, 300, 300]\n else:\n self.torsion_seq = [180, 180, 180, 300]\n elif self.chiriality == \"right\":\n if self.helice.sub_type:\n self.torsion_seq = [60, 60, 60, 180]\n else:\n self.torsion_seq = [60, 180, 180, 180]\n else:\n raise ValueError(\"Please specify chiriality: left or right\")\n else:\n raise Exception(\"Helice_%s is currently not supported\" % self.helice)\n\n self.configs = configs\n self.infinite = infinite\n # self.pattern = 0\n self.monomers = []\n self.weights = {}", "def setParams(self, p = 2):\n self.p = p\n self.l = p - 1\n self.id_ntot = {}\n self.id_y = {}\n self.id_W = {}\n self.id_X = {}\n for i in self.uniids:\n tracker = (self.data['id'] == i)\n self.id_ntot.update({i: np.sum(tracker)})\n self.id_y.update({i:\n self.data['weight'][tracker].reshape(np.sum(tracker), 1)})\n self.id_W.update({i: self._designMatrix_(p, tracker)})\n self.id_X.update({i:\n self._designMatrix_(self.l+1,tracker,is_X=True)})\n self.id_Z = self.id_W.copy()" ]
[ "0.5515874", "0.5445426", "0.5421652", "0.54088384", "0.5321685", "0.5316981", "0.5300015", "0.52771187", "0.52574724", "0.5255685", "0.52163506", "0.5180207", "0.5175576", "0.51701", "0.5159775", "0.51574296", "0.51267034", "0.51238847", "0.5120029", "0.5111619", "0.5101273", "0.50828934", "0.5080192", "0.5072543", "0.5065332", "0.5045554", "0.50391465", "0.5030391", "0.5012922", "0.5011559", "0.5011095", "0.50067556", "0.49796143", "0.49758857", "0.4970083", "0.4961774", "0.4958691", "0.49586606", "0.49550942", "0.49525827", "0.49477443", "0.49435037", "0.49290463", "0.49210644", "0.4919619", "0.49147266", "0.4914596", "0.49024618", "0.48994908", "0.4886655", "0.48838353", "0.4868877", "0.4858821", "0.48568016", "0.48529977", "0.4844914", "0.484398", "0.48406738", "0.4839605", "0.48354116", "0.4833792", "0.48292497", "0.48259723", "0.48232043", "0.4816639", "0.481617", "0.48088157", "0.48087105", "0.48048434", "0.48046497", "0.4803316", "0.47981164", "0.47971523", "0.4795602", "0.47943917", "0.4790233", "0.47826344", "0.47785676", "0.47776157", "0.47760716", "0.4770654", "0.47703958", "0.47699156", "0.47660375", "0.47658548", "0.47647807", "0.47644234", "0.4762058", "0.47607154", "0.4752488", "0.4751177", "0.4749186", "0.4744832", "0.47448063", "0.47441065", "0.47384953", "0.47362798", "0.47337383", "0.47332063", "0.4732775" ]
0.77108574
0
set EMAN2 CTF object in the header of input image using values of CTF parameters given in the list p
def set_ctf(ima, p): from utilities import generate_ctf ctf = generate_ctf( p ) ima.set_attr( "ctf", ctf )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_ctf(p):\n\tfrom EMAN2 import EMAN2Ctf\n\n\tdefocus = p[0]\n\tcs = p[1]\n\tvoltage = p[2]\n\tpixel_size = p[3]\n\tbfactor = p[4]\n\tamp_contrast = p[5]\n\t\n\tif defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention\n\t\tdefocus *= 1e-4\n\t\n\tif amp_contrast < 1.0:\n\t\tfrom math import sqrt\n\t\tamp_contrast = amp_contrast*100/sqrt(2*amp_contrast**2-2*amp_contrast+1)\n\n\tctf = EMAN2Ctf()\n\tif(len(p) == 6):\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast})\n\telse:\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast,'dfdiff':p[6],'dfang':p[7]})\n\t\t\n\treturn ctf", "def setheaders(f):\n f.headers['OBSERVER'] = \"'%s'\" % camera.status.observer\n f.headers['FILTERID'] = \"'%s'\" % filtname(camera.status.filter)\n f.headers['FILTER'] = \"%1d\" % camera.status.filter\n f.headers['XYSTAGE'] = \"'%d,%d'\" % camera.status.guider\n f.headers['MIRROR'] = \"'%s'\" % camera.status.mirror\n if camera.status.imgtype == 'BIAS':\n f.headers['BIAS'] = camera.status.object\n elif camera.status.imgtype == 'DARK':\n f.headers['DARK'] = camera.status.object\n else:\n f.headers['OBJECT'] = camera.status.object\n try:\n skytemp = weather.status.skytemp\n f.headers['SKYTEMP'] = \"%4.1f\" % skytemp\n f.comments['SKYTEMP'] = \"'Infrared sky temp in degC'\"\n except:\n pass\n\n try:\n if not camera.status.TJ.current.posviolate: #Position calibrated to epoch\n ra = camera.status.TJ.current.Ra/15/3600\n dec = camera.status.TJ.current.Dec/3600\n epoch = camera.status.TJ.current.Epoch\n alt = camera.status.TJ.current.Alt\n GotTJ = True\n elif camera.status.TJ.current.RaC:\n ra = camera.status.TJ.current.RaC\n dec = camera.status.TJ.current.DecC\n alt = camera.status.TJ.current.Alt\n t = time.gmtime()\n epoch = t.tm_year + (t.tm_yday/366.0)\n GotTJ = True\n else:\n GotTJ = False\n except AttributeError:\n GotTJ = False \n if GotTJ:\n f.headers['RA_OBJ'] = \"%12.9f\" % (ra*15.0)\n f.headers['RA'] = \"'%s'\" % sexstring(ra)\n f.headers['DEC_OBJ'] = \"%13.9f\" % dec\n f.headers['DEC'] = \"'%s'\" % sexstring(dec)\n f.headers['EQUINOX'] = \"%6.1f\" % epoch\n f.headers['SECZ'] = \"%6.3f\" % (1/math.cos((90-alt)*math.pi/180))\n if GotFT:\n hjd,message = fitstime.findtime(fimage=f, verbose=0, allfields=0)\n if type(hjd) == float:\n f.headers['HJD'] = \"%f\" % hjd\n f.comments['HJD'] = \"Heliocentric Julian Day at exposure midpoint\"", "def __call__(cls, nir_paw_image_fname, nir_paw_conf_fname, output_template, conf_limit):\n\n# on with the show\n logger.info('Opening science and confidence frames')\n ifits=fitsio.FITS(nir_paw_image_fname,'r')\n cfits=fitsio.FITS(nir_paw_conf_fname,'r')\n\n#\n# Check that the number of HDUs match\n#\n\n if (len(ifits) != len(cfits)):\n print(\"Number of HDUs/extensions in IMAGE and CONFidence files do not match.\")\n print(\"Aborting\")\n exit(1)\n\n p_ih=ifits[0].read_header()\n p_ch=cfits[0].read_header()\n# Remove reserve keywords\n p_ih.clean()\n\n#\n# Extract some keywords from PRIMARY header to propagate into the individual images.\n#\n base_dict={}\n base_header=[]\n for hkeep in nci.nir_paw_primary_keep:\n if (hkeep in p_ih):\n base_header.append({'name':hkeep,'value':p_ih[hkeep],'comment':p_ih.get_comment(hkeep)})\n base_dict[hkeep]={'value':p_ih[hkeep],'comment':p_ih.get_comment(hkeep)}\n else:\n print(\"Keyword {:s} missing in HDU[{:d}]\".format(hkeep,0))\n#\n# If possible, need too keep track of REQTIME (requested frametime) because sometimes \n# EXPTIME seems to be mispopulated in the CCD image HDUs with TEXPTIME\n#\n if ('TEXPTIME' in p_ih):\n texptime=p_ih['TEXPTIME']\n else:\n texptime=None\n if ('REQTIME' in p_ih):\n reqtime=p_ih['REQTIME']\n else:\n reqtime=None\n#\n# print(base_header)\n \n\n#\n# Step through HDUs... and form \"CCD\" images for each HDU\n#\n ExtList=[]\n for hnum in range(1,len(ifits)):\n print(\"############ Begin work on extnum={:d} ###############\".format(hnum))\n\n# Check that extensions match (after that concentrate on image).\n print(hnum,ifits[hnum].get_extname(),cfits[hnum].get_extname())\n if (ifits[hnum].get_extname() != cfits[hnum].get_extname()):\n print(\"Working on extension {:d}. Extension names (image,conf) of ([{:s}],[{:s}]) do not match!\".format(\n hnum,ifits[hnum].get_extname(),cfits[hnum].get_extname()))\n print(\"Aborting!\")\n exit(1)\n\n f_ih=ifits[hnum].read_header()\n f_ih.clean()\n#\n# Fix occurences where the CCD-level keyword EXPTIME has inherited the value of TEXPTIME\n#\n exptime=f_ih['EXPTIME']\n if (reqtime is not None):\n if (exptime > reqtime):\n print(\"Warning: possible corrupt EXPTIME (total exptime rather than frame time present).\")\n print(\"Attempting to update EXTIME to REQTIME (requested frame time).\")\n print(\" Primary HDU: TEXPTIME: {:}\".format(texptime))\n print(\" Primary HDU: REQTIME: {:}\".format(reqtime))\n print(\" Current HDU: EXPTIME: {:} --> {:}\".format(exptime,reqtime))\n exptime=reqtime\n f_ih['EXPTIME']=reqtime\n#\n# Augment keywords pulled from primary header with keywords from current HDU\n#\n c_header=base_header[:]\n c_dict=dict(base_dict)\n for hkeep in nci.nir_paw_hdu_keep:\n if (hkeep in f_ih):\n# print(hkeep,f_ih[hkeep],f_ih.get_comment(hkeep))\n c_header.append({'name':hkeep,'value':f_ih[hkeep],'comment':f_ih.get_comment(hkeep)})\n if (hkeep in c_dict):\n print(\"Warning: Replacing keyword {:s} with value from hdu={:d}\".format(hkeep,hnum))\n c_dict[hkeep]={'value':f_ih[hkeep],'comment':f_ih.get_comment(hkeep)}\n else:\n print(\"Keyword {:s} missing in HDU[{:d}]\".format(hkeep,hnum))\n\n#\n# Get the CCDNUM from special keyword and propagate\n# Get SKYLEVEL, SKYNOISE, ZEROPOINT and form basis value for the weight plane\n#\n ccdnum=f_ih['HIERARCH ESO DET CHIP NO']\n c_header.append({'name':'CCDNUM','value':ccdnum,'comment':'Unique Detector Number'})\n\n# exptime=f_ih['EXPTIME']\n## Fix occurences where the CCD-level keyword EXPTIME has inherited the value of TEXPTIME\n# if (exptime > reqtime):\n# print(\"Warning: possible corrupt EXPTIME (total exptime rather than frame time present).\")\n# print(\"Attempting to update EXTIME to REQTIME (requested frame time).\")\n# print(\" Primary HDU: TEXPTIME: {:.2f}\".format(texptime))\n# print(\" Primary HDU: REQTIME: {:.2f}\".format(reqtime))\n# print(\" Current HDU: EXPTIME: {:.2f} --> {:.2f}\".format(exptime,reqtime))\n# exptime=reqtime\n# f_ih['EXPTIME']=reqtime\n\n mtime=2.5*np.log10(exptime)\n skylev=f_ih['SKYLEVEL']\n skyrms=f_ih['SKYNOISE']\n seeing=f_ih['SEEING']\n magzpt=f_ih['MAGZPT']\n\n# zeropoint include a correction from VEGA->AB\n# zeropoint in headers was found to have a factor for EXPTIME removed (have to add back in for DES-like processing)\n\n if (p_ih['BAND'] in nci.nir_vega_to_ab):\n magzpt=magzpt+nci.nir_vega_to_ab[p_ih['BAND']]+mtime\n else:\n print(\"Warning! Unknown BAND ({:s}) for conversion of zeropoint from VEGA to AB system\".format(p_ih['BAND']))\n\n c_header.append({'name':'SKYBRITE', 'value':skylev, 'comment':'Sky level estimate from IMCORE'})\n c_header.append({'name':'SKYSIGMA', 'value':skyrms, 'comment':'Sky noise estimate from IMCORE'})\n c_header.append({'name':'SKYVARA', 'value':skyrms*skyrms, 'comment':'Sky noise estimate from IMCORE'})\n c_header.append({'name':'SKYVARB', 'value':skyrms*skyrms, 'comment':'Sky noise estimate from IMCORE'})\n c_header.append({'name':'FWHM', 'value':seeing, 'comment':'Average FWHM (pixels)'})\n c_header.append({'name':'MAG_ZERO', 'value':magzpt, 'comment':'Converted MAGZPT(Vega) to AB system'})\n nite_val=convert_utc_str_to_nite(f_ih['DATE-OBS'])\n c_header.append({'name':'NITE', 'value':nite_val, 'comment':'Observation Nite'})\n c_header.append({'name':'SATURATE', 'value':nci.nircam_satval[ccdnum], 'comment': 'Saturation Level (ADU)'})\n c_header.append({'name':'PIXSCAL1', 'value':0.341, 'comment': 'Fiducial pixel scale (arcsec/pix)'})\n c_header.append({'name':'PIXSCAL2', 'value':0.341, 'comment': 'Fiducial pixel scale (arcsec/pix)'})\n\n# bval=f_ih['BSCALE']\n# print(\"BSCALE was: \",bval)\n print(\"SKYLEVEL was: \",skylev)\n print(\"SKYRMS was: \",skyrms)\n#\n# Searching for a proper WGT prescription\n#\n# This was what I took to be equivalent to DES (but perhaps it does not properly factor in N-image stack\n# wgtval=skylev+(skyrms*skyrms)\n print(\"SKYLEV + (SKYRMS*SKYRMS): \",skylev+(skyrms*skyrms))\n#\n# This was assuming SKYLEVEL does not properly inform stats\n# wgtval=(skyrms*skyrms)\n print(\"(SKYRMS*SKYRMS): \",skyrms*skyrms)\n\n#\n# Read the image data from the science and confidence files.\n#\n sci_data=ifits[hnum].read()\n print(\"Median of data {:.3f} \".format(np.median(sci_data)))\n conf_data=cfits[hnum].read()\n\n#\n# Better seemed to be a re-measurement of STD\n#\n print(\"Attempting an improved SKYRMS with 3-sigma clip to remove objects\")\n avgval, medval, stdval = medclip(sci_data,verbose=3)\n# print(avgval,medval,stdval)\n print(\"stdval^2: \",stdval*stdval)\n wgtval=(stdval*stdval)\n# print(wgtval)\n#\n# Use the new (i.e. chip-based header) to feed a WCS \n# Use image size to feed calculations for center and corners (similar to despyastro.CCD_corners\n#\n print(\"Calculating center/corners assuuming native ZPN projection\")\n w=WCS(fitsio.FITSHDR(c_header))\n\n fnax2=float(sci_data.shape[0])\n fnax1=float(sci_data.shape[1])\n corn_x=np.array([fnax1/2.0,1.,fnax1,fnax1,1.])\n corn_y=np.array([fnax2/2.0,1.,1.,fnax2,fnax2])\n sky = w.pixel_to_world(corn_x,corn_y)\n corn_ra=sky.ra.degree\n corn_dec=sky.dec.degree\n\n c_header.append({'name':'RA_CENT', 'value':corn_ra[0], 'comment':'RA center'})\n c_header.append({'name':'DEC_CENT','value':corn_dec[0],'comment':'DEC center'})\n for i in range(1,5):\n c_header.append({'name':'RAC{:d}'.format(i), 'value':corn_ra[i], 'comment':'RA corner {:d}'.format(i)})\n c_header.append({'name':'DECC{:d}'.format(i),'value':corn_dec[i],'comment':'DEC corner {:d}'.format(i)})\n RACMIN, RACMAX, DECCMIN, DECCMAX, CROSSRA0 = get_DESDM_corners_extent(corn_ra, corn_dec)\n c_header.append({'name':'RACMIN', 'value':RACMIN, 'comment':'Minimum extent of image in RA'})\n c_header.append({'name':'RACMAX', 'value':RACMAX, 'comment':'Maximum extent of image in RA'})\n c_header.append({'name':'DECCMIN', 'value':DECCMIN, 'comment':'Minimum extent of image in Declination'})\n c_header.append({'name':'DECCMAX', 'value':DECCMAX, 'comment':'Maximum extent of image in Declination'})\n c_header.append({'name':'CROSSRA0','value':CROSSRA0,'comment':'Does Image Span RA 0h (Y/N)'})\n c_header.append({'name':'DESEPOCH','value':'NIREPOCH','comment':'Default DES epoch definition for including NIR data'})\n#\n#\n#\n print(\"Stripping ZPN projection from WCS and creating a shift to get a rough TAN\")\n recs_to_delete=[] \n for i, hrec in enumerate(c_header):\n if (hrec['name'] == 'CTYPE1'):\n c_header[i]['value']='RA---TAN'\n if (hrec['name'] == 'CTYPE2'):\n c_header[i]['value']='DEC--TAN'\n\n if (hrec['name'] == 'CRVAL1'):\n c_header[i]['value']=corn_ra[0]\n if (hrec['name'] == 'CRVAL2'):\n c_header[i]['value']=corn_dec[0]\n if (hrec['name'] == 'CRPIX1'):\n c_header[i]['value']=fnax1/2.0\n if (hrec['name'] == 'CRPIX2'):\n c_header[i]['value']=fnax2/2.0\n\n if (hrec['name'] in ['PV2_1','PV2_2','PV2_3','PV2_4','PV2_5']):\n recs_to_delete.append(i)\n if (len(recs_to_delete) > 0):\n for i in sorted(recs_to_delete,reverse=True):\n x=c_header.pop(i)\n print(\"Removing: {:}\".format(x))\n\n whack=WCS(fitsio.FITSHDR(c_header))\n skyhack = whack.pixel_to_world(corn_x,corn_y)\n whack_corn_ra=skyhack.ra.degree\n whack_corn_dec=skyhack.dec.degree\n for i in range(5):\n cosdec=np.cos(corn_dec[i]*np.pi/180.)\n dra=3600.*(corn_ra[i]-whack_corn_ra[i])*cosdec\n ddec=3600.*(corn_dec[i]-whack_corn_dec[i])\n print(\" WCS shift {:d} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} \".format(ccdnum,corn_ra[i],corn_dec[i],whack_corn_ra[i],whack_corn_dec[i],dra,ddec))\n\n# for i, hrec in enumerate(c_header):\n# print(i,hrec)\n\n#\n# Form the SCI, MSK, and WGT HDUs\n#\n im=DESImage(init_data=True,init_mask=True,init_weight=True,shape=sci_data.shape)\n\n im.data=np.float32(sci_data)\n msk_wsm=np.where(conf_data<conf_limit)\n im.mask[msk_wsm] |= BADPIX_BPM\n im.weight=np.float32(conf_data/100./wgtval)\n#\n# Check for extra conditions where further masking is needed\n# Here is where CCD=6 check was started (now removed and placed \n# in nir_starmask to take advantage of bright object masking\n#\n\n\n#\n# Deal with individual header-isms and write out SCI, MSK, WGT\n# Note this is using fitsio (duplicating some of the DESIMAGE.save \n# but customization was needed to deal with foibles of the current\n#\n fname=re.sub('%02d','{:02d}'.format(ccdnum),output_template,1)\n ofits = fitsio.FITS(fname, 'rw', clobber=True)\n\n im.header=fitsio.FITSHDR(c_header) \n im.header['DES_EXT']='IMAGE'\n im.header = update_hdr_compression(im.header, 'SCI')\n ofits.write(im.data,extname='SCI',header=im.header)\n\n\n im.mask_hdr=fitsio.FITSHDR(c_header) \n im.mask_hdr['DES_EXT']='MASK'\n im.mask_hdr = update_hdr_compression(im.mask_hdr, 'MSK')\n im.mask_hdr['DES_EXT']='MASK'\n ofits.write(im.mask,extname='MSK',header=im.mask_hdr)\n\n# im.weight_hdr=fitsio.FITSHDR(c_header) \n# print(im.weight_hdr)\n im.weight_hdr = update_hdr_compression(im.weight_hdr, 'WGT')\n# print(im.weight_hdr)\n im.weight_hdr['DES_EXT']='WEIGHT'\n ofits.write(im.weight,extname='WGT',header=im.weight_hdr)\n\n ofits.close()\n print(\"Wrote {:s}\".format(fname))\n print(\" \")\n \n\n ifits.close()\n cfits.close()\n\n ret_code = 0\n return ret_code", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def processCerFile(k, fb, newcorex=None, newcorey=None, sz=0):\n\n #---- Read Event Header\n evth = unpack('{}f'.format(evthSize), fb.read(evthSize * wordSize))\n #print(evth)\n\n primary = get_primary(evth)\n energy = get_energy(evth)\n height = get_height_first(evth)\n thetaEvtH, phiEvtH = get_direction(evth)\n coreX, coreY, coreD = get_core(evth)\n\n print('{:4d} {:3d} {:9d} {:6.1f} {:8.1f} {:7.1f} {:7.1f} {:8.1f} {:5.1f} {:5.1f}'\n .format(k, int(primary), sz, energy, height, coreX, coreY, coreD, thetaEvtH, phiEvtH))\n\n return\n\n #---- Read Cherenkov photons from file\n\n wl = 999.\n i = 0\n\n while wl > 0.5:\n cphotonData = fb.read(cphotonSize * wordSize)\n \n i = i + 1\n wl, x, y, u, v, t, h = unpack('{}f'.format(cphotonSize), cphotonData)\n w = sqrt(1.0 - u ** 2 - v ** 2)\n \n if wl < 1.:\n continue\n\n wl = wl - 101000.\n\n print('{} {} {:.2f} {:.2f} {:.2f} {:.6f} {:.6f} {:.6f} {:.8f} {:.2f}'\n .format(k, i, wl, x, y, u, v, w, t, h))", "def test_property_cols():\n image_file = 'input/D00572501_z_c01_r3624p01_immasked.fits.fz'\n cat_file = 'input/D00572501_z_c01_r5473p01_piff.fits'\n psf_file = os.path.join('output','test_property_cols.piff')\n hsm_file = os.path.join('output','test_property_cols_hsm.fits')\n\n nstars = 25\n scale = 0.26\n size = 15\n order = 1\n stamp_size = 25\n\n config = {\n 'input' : {\n 'nstars': nstars,\n 'image_file_name' : image_file,\n 'image_hdu' : 1,\n 'weight_hdu' : 3,\n 'badpix_hdu' : 2,\n 'cat_file_name' : cat_file,\n 'x_col' : 'XWIN_IMAGE',\n 'y_col' : 'YWIN_IMAGE',\n 'sky_col' : 'BACKGROUND',\n 'stamp_size' : stamp_size,\n 'ra' : 'TELRA',\n 'dec' : 'TELDEC',\n 'gain' : 'GAINA',\n 'satur' : 'SATURATA',\n 'chipnum': 1,\n # Select ones with a variety of dtypes.\n 'property_cols' : ['SOURCE_ID', 'GI_COLOR', 'FLAGS', 'FLAG_COLOR', 'SPREAD_MODEL'],\n },\n 'select' : {\n 'type': 'Properties',\n 'where': 'np.abs(SPREAD_MODEL) < 3.e-4',\n\n 'reserve_frac' : 0.2,\n 'seed' : 1234,\n },\n 'psf' : {\n 'model' : {\n 'type' : 'PixelGrid',\n 'scale' : scale,\n 'size' : size,\n 'interp' : 'Lanczos(5)',\n },\n 'interp' : {\n 'type' : 'BasisPolynomial',\n 'order' : [1, 1, 1],\n 'keys': ['u', 'v', 'GI_COLOR'],\n },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats': [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n },\n ],\n },\n }\n\n piff.piffify(config)\n hsm = fitsio.read(hsm_file)\n cat = fitsio.read(cat_file)\n\n print('hsm dtype = ',hsm.dtype)\n print('cat dtype = ',cat.dtype)\n\n for key in hsm.dtype.names:\n print(key)\n if key in cat.dtype.names:\n assert hsm[key].dtype.type == cat[key].dtype.type\n elif key == 'reserve':\n assert hsm[key].dtype.type == np.dtype(bool).type\n elif key.startswith('flag'):\n assert hsm[key].dtype.type == np.dtype(int).type\n elif key == 'sky':\n # This one is read from the input catalog, but renamed\n assert hsm[key].dtype.type == np.float32\n else:\n assert hsm[key].dtype.type == np.dtype(float).type\n\n # Check that drawing the image works without specifying chipnum.\n # This is ok so long as the input is really only a single chip.\n # cf. Issue #140\n psf = piff.read(psf_file)\n im1 = psf.draw(35, 40, center=True, GI_COLOR=1)\n\n # If the input field didn't include chipnum, then it makes no difference for a single chip.\n del config['input']['chipnum']\n piff.piffify(config)\n psf = piff.read(psf_file)\n im2 = psf.draw(35, 40, center=True, GI_COLOR=1)\n assert im1 == im2", "def __init__(self, encut, magmom, ldaul, Uparam, Jparam, name=\"DFTCL_settings\"):\n\n cl_settings = {\"ISPIN\": 2, \"MAGMOM\": magmom, \"SAXIS\": None, \"LSORBIT\": None, \"LNONCOLLINEAR\": None}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIMX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=cl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"encut\", encut)", "def generateNHDRHeader(self, inputFile):\r\n\r\n logging.info('Processing started')\r\n #initialize PCR object\r\n imagePCRFile = PCRDataObject()\r\n #import image parameters of PCR object\r\n imagePCRFile.ImportFromFile(inputFile)\r\n\r\n filePathName, fileExtension = os.path.splitext(inputFile)\r\n #The directory of the .nhdr file\r\n nhdrPathName = filePathName + \".nhdr\"\r\n\r\n if fileExtension == \".pcr\":\r\n if imagePCRFile.form == 1 or imagePCRFile.form == 5 or imagePCRFile.form == 10:\r\n with open(nhdrPathName, \"w\") as headerFile:\r\n headerFile.write(\"NRRD0004\\n\")\r\n headerFile.write(\"# Complete NRRD file format specification at:\\n\")\r\n headerFile.write(\"# http://teem.sourceforge.net/nrrd/format.html\\n\")\r\n if imagePCRFile.form == 5:\r\n headerFile.write(\"type: ushort\\n\")\r\n elif imagePCRFile.form == 10:\r\n headerFile.write(\"type: float\\n\")\r\n elif imagePCRFile.form == 1:\r\n headerFile.write(\"type: uchar\\n\")\r\n headerFile.write(\"dimension: 3\\n\")\r\n headerFile.write(\"space: left-posterior-superior\\n\")\r\n sizeX = imagePCRFile.X\r\n sizeY = imagePCRFile.Y\r\n sizeZ = imagePCRFile.Z\r\n headerFile.write(f\"sizes: {sizeX} {sizeY} {sizeZ}\\n\")\r\n volSpace = imagePCRFile.voxelSize\r\n headerFile.write(f\"space directions: ({volSpace}, 0.0, 0.0) (0.0, {volSpace}, 0.0) (0.0, 0.0, {volSpace})\\n\")\r\n headerFile.write(\"kinds: domain domain domain\\n\")\r\n headerFile.write(\"endian: little\\n\")\r\n headerFile.write(\"encoding: raw\\n\")\r\n headerFile.write(\"space origin: (0.0, 0.0, 0.0)\\n\")\r\n volPathName = filePathName + \".vol\"\r\n volPathSplit = []\r\n volPathSplit = volPathName.split('/')\r\n volFileName = volPathSplit[len(volPathSplit)-1]\r\n headerFile.write(f\"data file: {volFileName}\\n\")\r\n # print(imagePCRFile.form)\r\n print(f\".nhdr file path is: {nhdrPathName}\")\r\n #Automatically loading .vol file using the generated .nhdr file.\r\n if os.path.exists(volPathName):\r\n slicer.util.loadVolume(nhdrPathName)\r\n print(f\"{volFileName} loaded\\n\")\r\n else:\r\n print(f\"{volFileName} is not in the same directory\\n\")\r\n else:\r\n print(\"The format of this dataset is currently not supported by this module. Currently only float (format=10), unsigned 16 bit integer (format=5) and unsigned 8 bit integer (format=1) data types are supported. Please contact us with this dataset to enable this data type.\")\r\n else:\r\n print(\"This is not a PCR file, please re-select a PCR file\")", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr", "def eff_param():\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)", "def get_ctf(ima):\n\tfrom EMAN2 import EMAN2Ctf\n\tctf_params = ima.get_attr(\"ctf\")\t\n\treturn ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang", "def update_header(arr_imgs,obj,filter_i):\n \n for img in arr_imgs:\n warnings.simplefilter('ignore', category=AstropyUserWarning)\n try:\n hdulist = fits.open(img,ignore_missing_end=True)\n #if there is only a primary header get the data from it\n if len(hdulist) == 1:\n data = getdata(img, 0, header=False)\n #if there is more than one header get data from the 'SCI' extension\n else:\n data = getdata(img, 1, header=False)\n #Get value of EXPTIME and PHOTZPT keyword from primary header and \n #set CCDGAIN to a default value of 1\n EXPTIME = hdulist[0].header['EXPTIME']\n PHOTFLAM = hdulist[1].header['PHOTFLAM']\n PHOTZPT = hdulist[1].header['PHOTZPT']\n CCDGAIN = 1.0\n #First pass locating value for gain\n for i in range(2):\n if len(hdulist) == 1:\n break\n #Go through primary and secondary header and ignore the \n #BinTable formatted header\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['CCDGAIN']\n break\n if 'GAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['GAIN']\n break\n if 'ATODGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['ATODGAIN']\n break\n \n #Locating units of image\n print('Doing BUNIT check')\n for i in range(2):\n #If there is only one header then this is the only place to \n #check\n if len(hdulist) == 1:\n bunit = hdulist[0].header['D001OUUN']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'counts':\n ### Rescaling zeropoint\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n hdulist[0].header.set('BUNIT','COUNTS/S')\n hdulist[0].header.set('MAGZPT',ZPT_NEW)\n print('BUNIT is {0}'.format(hdulist[0].\\\n header['BUNIT']))\n \n #If there are multiple headers then they all have to be checked\n else:\n if 'BUNIT' in hdulist[i].header:\n bunit = hdulist[i].header['BUNIT']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'COUNTS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n if bunit == 'ELECTRONS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN*EXPTIME) \\\n + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/(CCDGAIN*EXPTIME))*pixmod\n if bunit == 'ELECTRONS/S':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n if bunit == 'ELECTRONS/SEC':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n hdulist[i].header['BUNIT'] = 'COUNTS/S'\n hdulist[i].header['MAGZPT'] = ZPT_NEW\n ###\n print('BUNIT is {0}'.format(hdulist[i].\\\n header['BUNIT']))\n print('PHOTZPT is {0}'.format(hdulist[i].\\\n header['MAGZPT']))\n print('Done changing BUNIT')\n \n #Second pass to assign gain and exptime to headers\n for i in range(2):\n if len(hdulist) == 1:\n break\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' not in hdulist[i].header:\n hdulist[i].header.set('CCDGAIN',CCDGAIN)\n if 'EXPTIME' not in hdulist[i].header:\n hdulist[i].header.set('EXPTIME',EXPTIME)\n \n #Make new versions of images in interim/obj1 folder\n os.chdir(path_to_interim + obj)\n #Remove .fits extension\n img = os.path.splitext(img)[0]\n #If there was only one header write that header's data to new\n #version of fits image\n if len(hdulist) == 1:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[0].\\\n header,output_verify='ignore')\n #Else write the 'SCI' header's data to new version of fits image\n else:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[1].\\\n header,output_verify='ignore')\n hdulist.close()\n os.chdir(path_to_raw + obj)\n \n #This is to catch 'empty or corrupt FITS file' or any other IOError\n #and write it to a text file along with the object name and the \n #filter name\n except IOError as e:\n os.chdir('..')\n dir_path = os.getcwd()\n if os.path.basename(dir_path) == 'raw':\n os.chdir(path_to_interim)\n with open('Error_swarp.txt','a') as newfile: \n newfile.write('Object {0} and image {1} raises {2}'.\\\n format(obj,img,e))\n newfile.write('\\n')\n newfile.close()\n os.chdir(path_to_raw + obj)\n \n os.chdir(path_to_interim + obj)\n #For this object and filter combination grab all the new versions made\n arr = glob('*test_'+filter_i+'.fits')\n print(len(arr))\n if len(arr) >= 1: #avoid empty cases where files have been removed earlier\n #or don't exist at all since the dictionary also contains\n #pairs of objects and filters that didn't meet the swarp\n #requirements (didn't pass preliminary exptime or filter\n #checks so those folders/images don't exist)\n \n #If new versions exist then write their names to a text file \n with open(filter_i+'_img_list_testfil.txt','wb') as newfile2:\n for obj in arr:\n newfile2.write(obj)\n newfile2.write('\\n')\n newfile2.close()\n #If text file exists return the name\n return filter_i+'_img_list_testfil.txt'\n #If text file doesn't exist return this string\n return 'error'", "def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def SetMetadata(IMAGE,METADATA):\n IMAGE.SetSpacing(METADATA[0])\n IMAGE.SetOrigin(METADATA[1])\n IMAGE.SetDirection(METADATA[2])", "def set_field(self,Hext):\n self.raw_parameters[\"Hext\"] = Hext\n self.parameters = NormalizedParameters(self.raw_parameters)\n self._load()", "def __init__(self, encut, name=\"scf_settings\"):\n InputParameters.__init__(self, name=name)\n self.update_electronic_settings(\"ENCUT\", encut)", "def read_kitti_label(file, p2, use_3d_for_2d=False):\n\n gts = []\n\n text_file = open(file, 'r')\n\n '''\n Values Name Description\n ----------------------------------------------------------------------------\n 1 type Describes the type of object: 'Car', 'Van', 'Truck',\n 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',\n 'Misc' or 'DontCare'\n 1 truncated Float from 0 (non-truncated) to 1 (truncated), where\n truncated refers to the object leaving image boundaries\n 1 occluded Integer (0,1,2,3) indicating occlusion state:\n 0 = fully visible, 1 = partly occluded\n 2 = largely occluded, 3 = unknown\n 1 alpha Observation angle of object, ranging [-pi..pi]\n 4 bbox 2D bounding box of object in the image (0-based index):\n contains left, top, right, bottom pixel coordinates\n 3 dimensions 3D object dimensions: height, width, length (in meters)\n 3 location 3D object location x,y,z in camera coordinates (in meters)\n 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]\n 1 score Only for results: Float, indicating confidence in\n detection, needed for p/r curves, higher is better.\n '''\n\n pattern = re.compile(('([a-zA-Z\\-\\?\\_]+)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+'\n + '(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s*((fpat)?)\\n')\n .replace('fpat', '[-+]?\\d*\\.\\d+|[-+]?\\d+'))\n\n for line in text_file:\n\n parsed = pattern.fullmatch(line)\n\n # bbGt annotation in text format of:\n # cls x y w h occ x y w h ign ang\n if parsed is not None:\n\n obj = edict()\n\n ign = False\n\n cls = parsed.group(1) # type\n trunc = float(parsed.group(2))\n occ = float(parsed.group(3))\n alpha = float(parsed.group(4))\n\n x = float(parsed.group(5)) # left\n y = float(parsed.group(6)) # top\n x2 = float(parsed.group(7)) # right\n y2 = float(parsed.group(8)) # bottom\n\n width = x2 - x + 1\n height = y2 - y + 1\n\n h3d = float(parsed.group(9))\n w3d = float(parsed.group(10))\n l3d = float(parsed.group(11))\n\n cx3d = float(parsed.group(12)) # center of car in 3d\n cy3d = float(parsed.group(13)) # bottom of car in 3d\n cz3d = float(parsed.group(14)) # center of car in 3d\n rotY = float(parsed.group(15))\n\n # actually center the box\n cy3d -= (h3d / 2)\n\n elevation = (1.65 - cy3d) # height above sea level\n\n if use_3d_for_2d and h3d > 0 and w3d > 0 and l3d > 0:\n\n # re-compute the 2D box using 3D (finally, avoids clipped boxes)\n verts3d, corners_3d = project_3d(p2, cx3d, cy3d, cz3d, w3d, h3d, l3d, rotY, return_3d=True)\n\n # any boxes behind camera plane?\n if np.any(corners_3d[2, :] <= 0):\n ign = True\n\n else: # 3d for 2d\n x = min(verts3d[:, 0])\n y = min(verts3d[:, 1])\n x2 = max(verts3d[:, 0])\n y2 = max(verts3d[:, 1])\n\n width = x2 - x + 1\n height = y2 - y + 1\n\n else:\n verts3d, corners_3d = np.zeros((8, 2)), np.zeros((3, 8))\n\n # project cx, cy, cz\n coord3d = p2.dot(np.array([cx3d, cy3d, cz3d, 1]))\n\n # store the projected instead\n cx3d_2d = coord3d[0]\n cy3d_2d = coord3d[1]\n cz3d_2d = coord3d[2] # TODO: depth?\n\n # 3d center to 2d, image coordinate\n cx = cx3d_2d / cz3d_2d\n cy = cy3d_2d / cz3d_2d\n\n # encode occlusion with range estimation\n # 0 = fully visible, 1 = partly occluded\n # 2 = largely occluded, 3 = unknown\n if occ == 0:\n vis = 1\n elif occ == 1:\n vis = 0.66\n elif occ == 2:\n vis = 0.33\n else:\n vis = 0.0\n\n while rotY > math.pi: rotY -= math.pi * 2\n while rotY < (-math.pi): rotY += math.pi * 2\n\n # recompute alpha\n alpha = convertRot2Alpha(rotY, cz3d, cx3d) # TODO: why don't use alpha in Kitti directly?\n\n obj.elevation = elevation\n obj.cls = cls\n obj.occ = occ > 0\n obj.ign = ign\n obj.visibility = vis\n obj.trunc = trunc\n obj.alpha = alpha\n obj.rotY = rotY\n\n # is there an extra field? (assume to be track)\n if len(parsed.groups()) >= 16 and parsed.group(16).isdigit(): obj.track = int(parsed.group(16))\n\n obj.bbox_full = np.array([x, y, width, height])\n obj.bbox_3d = [cx, cy, cz3d_2d, w3d, h3d, l3d, alpha, cx3d, cy3d, cz3d, rotY]\n # 2d center, depth, 3d shape, alpha, 3d center, rY\n obj.center_3d = [cx3d, cy3d, cz3d]\n # print(verts3d[:8], corners_3d)\n # 8 * 2 x, y\n # [[716.2700834 144.0556177]\n # [820.29305993 144.00207322]\n # [820.29305993 307.58688203]\n # [808.68674867 300.53454034]\n # [808.68674867 146.02789809]\n # [710.44462716 146.07566844]\n # [710.44462716 300.36824124]\n # [716.2700834 307.40048192]]\n\n # 3 * 8, x, y (height), z (depth)\n # [[1.23763004 2.43757004 2.43757004 2.44236996 2.44236996 1.24242996 1.24242996 1.23763004]\n # [-0.42 -0.42 1.47 1.47 -0.42 -0.42 1.47 1.47]\n # [8.1760119 8.1640121 8.1640121 8.6439881 8.6439881 8.6559879 8.6559879 8.1760119]]\n obj.vertices = verts3d[:8].T.flatten()\n obj.corners_3d = corners_3d.flatten()\n\n gts.append(obj)\n\n text_file.close()\n\n return gts", "def nircam_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F444W', grism='DFSR'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0648, 0], [0, 0.0648]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F277W':0.30, 'F356W':0.90, 'F444W': 3.00, 'F322W2':1.25, 'F430M':0.65, 'F460M':0.86, 'F410M':0.5} # F410M is a hack, no number\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRCam'\n h['READN'] = 9, 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'DFSR':\n h['GRISM'] = 'DFSR', 'Spectral trace along X'\n else:\n h['GRISM'] = 'DFSC', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def convert2EbnerParamOriginalParam(listSlice,list_prefix,directory,paramAx,paramCor,paramSag):\n paramAx=np.load(paramAx)\n paramCor=np.load(paramCor)\n paramSag=np.load(paramSag)\n param=[]\n param.append(paramAx)\n param.append(paramCor)\n param.append(paramSag)\n \n images,mask = createVolumesFromAlist(listSlice.copy()) #list of images corresponding to differents original stacks\n \n \n mat = np.array([[-1,0,0,0],[0,-1,0,0],[0,0,1,0],[0,0,0,1]]) #matrix to convert affine matrix from nibabel to itk\n\n for n in range(len(images)): #for each stack\n \n imagen = images[n]\n \n for i_slice in range(len(images[n])): #for each slices (in each stacks)\n \n slicei=imagen[i_slice]\n dimension=3\n X,Y,Z= slicei.get_slice().get_fdata().shape\n transfo = param[n][slicei.get_index_slice(),:,:]\n #print()\n matrix = mat @ transfo @ mat\n #print(matrix)\n test = sitk.AffineTransform(dimension)\n test.SetMatrix(matrix[0:3,0:3].flatten())\n test.SetTranslation(matrix[0:3,3])\n images_index = slicei.get_index_image()\n\n sitk.WriteTransform(test,\"%s/%s_slice%d.tfm\" %(directory,list_prefix[images_index],slicei.get_index_slice())) #save rigid transformation, computed at the barycenter of the image, adatpted to itk", "def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, buck=None, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n if buck is None:\n self.buck_pms = []\n else:\n self.buck_pms = [] # TODO:\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh,\n self.eps_ult, *self.buck_pms, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def _text_write_preprocess(self):\n self.check()\n\n max_name_len = np.max([len(name) for name in self.name])\n fieldtypes = [\"U\" + str(max_name_len), \"f8\", \"f8\"]\n comp_names = self._get_lon_lat_component_names()\n frame_obj = self._get_frame_obj()\n frame_desc_str = _get_frame_desc_str(frame_obj)\n\n component_fieldnames = []\n for comp_name in comp_names:\n # This will add e.g. ra_J2000 and dec_J2000 for FK5\n component_fieldnames.append(comp_name + \"_\" + frame_desc_str)\n fieldnames = [\"source_id\"] + component_fieldnames\n stokes_names = [\"I\", \"Q\", \"U\", \"V\"]\n fieldshapes = [()] * 3\n\n if self.stokes_error is not None:\n stokes_error_names = [(f\"{k}_error\") for k in [\"I\", \"Q\", \"U\", \"V\"]]\n\n n_stokes = 0\n stokes_keep = []\n for si, total in enumerate(np.nansum(self.stokes.to(\"Jy\"), axis=(1, 2))):\n if total > 0:\n fieldnames.append(stokes_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n if self.stokes_error is not None:\n fieldnames.append(stokes_error_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n n_stokes += 1\n stokes_keep.append(total > 0)\n\n assert n_stokes >= 1, \"No components with nonzero flux.\"\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n fieldnames.append(\"subband_frequency\")\n else:\n fieldnames.append(\"frequency\")\n fieldtypes.append(\"f8\")\n fieldshapes.extend([(self.Nfreqs,)])\n elif self.reference_frequency is not None:\n fieldnames.extend([(\"reference_frequency\")])\n fieldtypes.extend([\"f8\"])\n fieldshapes.extend([()] * n_stokes + [()])\n if self.spectral_index is not None:\n fieldnames.append(\"spectral_index\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_rise_lst\"):\n fieldnames.append(\"rise_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_set_lst\"):\n fieldnames.append(\"set_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n dt = np.dtype(list(zip(fieldnames, fieldtypes, fieldshapes)))\n\n arr = np.empty(self.Ncomponents, dtype=dt)\n arr[\"source_id\"] = self.name\n\n for comp_ind, comp in enumerate(comp_names):\n arr[component_fieldnames[comp_ind]] = getattr(self.skycoord, comp).deg\n\n for ii in range(4):\n if stokes_keep[ii]:\n arr[stokes_names[ii]] = self.stokes[ii].T.to(\"Jy\").value\n if self.stokes_error is not None:\n arr[stokes_error_names[ii]] = self.stokes_error[ii].T.to(\"Jy\").value\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n arr[\"subband_frequency\"] = self.freq_array.to(\"Hz\").value\n else:\n arr[\"frequency\"] = self.freq_array.to(\"Hz\").value\n elif self.reference_frequency is not None:\n arr[\"reference_frequency\"] = self.reference_frequency.to(\"Hz\").value\n if self.spectral_index is not None:\n arr[\"spectral_index\"] = self.spectral_index\n\n if hasattr(self, \"_rise_lst\"):\n arr[\"rise_lst\"] = self._rise_lst\n if hasattr(self, \"_set_lst\"):\n arr[\"set_lst\"] = self._set_lst\n\n return arr", "def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs", "def __init__(self, osi, fy, fu, es, esh, eps_sh, eps_ult, cf, alpha_2, cd):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.es = float(es)\n self.esh = float(esh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n self.cf = float(cf)\n self.alpha_2 = alpha_2\n self.cd = float(cd)\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.es, self.esh, self.eps_sh, self.eps_ult, '-CMFatigue', self.cf, self.alpha_2, self.cd]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, lsr, alpha=1.0, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n self.lsr = float(lsr)\n self.alpha = float(alpha)\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh, self.eps_ult, '-DMBuck', self.lsr, self.alpha, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def __init__(self, encut, spinaxis, ldaul, Uparam, Jparam, name='DFTCL_settings'):\n ncl_settings = {\"ISPIN\": 2, \"MAGMOM\": None, \"SAXIS\": spinaxis, \"LSORBIT\": \".TRUE.\", \"LNONCOLLINEAR\": \".TRUE.\"}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=ncl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)", "def load_charmm_ff_params(fname):\n with open(fname) as f:\n lines = f.readlines()\n\n comment_stripper = re.compile(r'[!\\*].*')\n ffp = ForceFieldParams(fname)\n\n current_section = None\n for i in range(len(lines)):\n # Ignore comments and blank lines\n line = comment_stripper.sub('', lines[i].strip())\n if line == '': continue\n\n tokens = line.split()\n skip_line = False\n for section in ('ATOM', 'BOND', 'ANGL', 'DIHE', 'IMPR', 'NONB', 'CMAP'):\n if tokens[0].startswith(section):\n current_section = section\n skip_line = True\n break\n\n if skip_line: continue\n\n if current_section is 'BOND':\n key1, key2 = key_names((tokens[0], tokens[1]))\n ffp.bonds[key1] = ffp.bonds[key2] = {\n 'force_constant': float(tokens[2]),\n 'equilibrium_distance': float(tokens[3])\n }\n elif current_section is 'ANGL':\n # TODO: Urey-Bradley terms\n key1, key2 = key_names((tokens[0], tokens[1], tokens[2]))\n ffp.angles[key1] = ffp.angles[key2] = {\n 'force_constant': float(tokens[3]),\n 'equilibrium_angle': float(tokens[4]) * pi / 180.0\n }\n elif current_section is 'DIHE':\n key1, key2 = key_names((tokens[0], tokens[1], tokens[2], tokens[3]))\n ffp.dihedrals[key1] = ffp.dihedrals[key2] = {\n 'force_constant': float(tokens[4]),\n 'multiplicity': float(tokens[5]),\n 'delta': float(tokens[6])\n }\n elif current_section is 'IMPR':\n key = key_names((tokens[0], tokens[1], tokens[2], tokens[3]))\n else:\n # Unknown line type\n continue\n return ffp", "def get_aperture_coeffs_in_header(head):\n\n coeffs = {}\n for key, value in head.items():\n exp = '^GAMSE TRACE CHANNEL [A-Z] APERTURE \\d+ COEFF \\d+$'\n if re.match(exp, key) is not None:\n g = key.split()\n channel = g[3]\n aperture = int(g[5])\n icoeff = int(g[7])\n if (channel, aperture) not in coeffs:\n coeffs[(channel, aperture)] = []\n if len(coeffs[(channel, aperture)]) == icoeff:\n coeffs[(channel, aperture)].append(value)\n return coeffs", "def set_header( name, value ):", "def read_param_phil(self):\n\n # LABELIT target file settings\n if self.target_phil is None:\n self.write_default_phil()\n self.phil.ctr.SetValue(self.target_phil)\n\n # Resolution limits\n # \"Try/except\" for backwards compatibility\n try:\n lowres = self.params.cctbx_ha14.resolution_limits.low\n hires = self.params.cctbx_ha14.resolution_limits.high\n self.res_limits.lowres.SetValue(str(lowres))\n self.res_limits.hires.SetValue(str(hires))\n except AttributeError:\n pass\n\n # Target options\n # \"Try/except\" for backwards compatibility\n try:\n t_uc = self.params.cctbx_ha14.target_unit_cell\n t_lat = self.params.cctbx_ha14.target_lattice_type\n l_idx = self.target_lattice.ctr.FindString(str(t_lat))\n t_ctype = self.params.cctbx_ha14.target_centering_type\n if t_ctype == 'P':\n c_idx = 1\n elif t_ctype == 'C':\n c_idx = 2\n elif t_ctype == 'I':\n c_idx = 3\n elif t_ctype == 'R':\n c_idx = 4\n elif t_ctype == 'F':\n c_idx = 5\n else:\n c_idx = 0\n if t_uc is not None:\n uc_str = [str(i) for i in t_uc.parameters()]\n self.target_uc.cell.SetValue(' '.join(uc_str))\n self.target_lattice.ctr.SetSelection(l_idx)\n self.target_centering.ctr.SetSelection(c_idx)\n except AttributeError:\n pass\n\n # Grid search options\n idx = self.gs_type.ctr.FindString(self.params.cctbx_ha14.grid_search.type)\n self.set_grid_search(idx=idx)\n self.signal_search.SetValue(self.params.cctbx_ha14.grid_search.sig_height_search)\n\n # # Selection options\n # self.select_only.SetValue(self.params.cctbx_ha14.selection.select_only.flag_on)\n # self.img_objects_path.Enable(self.select_only.GetValue())\n\n idx = self.select_by.ctr.FindString(self.params.cctbx_ha14.selection.select_by)\n self.select_by.ctr.SetSelection(idx)\n\n self.min_sigma.sigma.SetValue(str(self.params.cctbx_ha14.selection.min_sigma))\n\n # Selection filters\n if self.params.cctbx_ha14.selection.prefilter.flag_on:\n pg = self.params.cctbx_ha14.selection.prefilter.target_pointgroup\n ut = self.params.cctbx_ha14.selection.prefilter.target_uc_tolerance\n rs = self.params.cctbx_ha14.selection.prefilter.min_resolution\n rf = self.params.cctbx_ha14.selection.prefilter.min_reflections\n if self.params.cctbx_ha14.selection.prefilter.target_unit_cell is not None:\n try:\n uc = self.params.cctbx_ha14.selection.prefilter.target_unit_cell.parameters()\n except AttributeError:\n uc = None\n else:\n uc = None\n\n if str(pg).lower() != 'none':\n self.filt_lattice.toggle_boxes()\n self.filt_lattice.lattice.SetValue(str(pg))\n if str(uc).lower() != 'none':\n self.filt_uc.toggle_boxes()\n self.filt_uc.a.SetValue(str(uc[0]))\n self.filt_uc.b.SetValue(str(uc[1]))\n self.filt_uc.c.SetValue(str(uc[2]))\n self.filt_uc.alpha.SetValue(str(uc[3]))\n self.filt_uc.beta.SetValue(str(uc[4]))\n self.filt_uc.gamma.SetValue(str(uc[5]))\n self.filt_uc.tolerance.SetValue(str(ut))\n if str(rs).lower() != 'none':\n self.filt_res.toggle_boxes()\n self.filt_res.res.SetValue(str(rs))\n if str(rf).lower() != 'none':\n self.filt_ref.toggle_boxes()\n self.filt_ref.ref.SetValue(str(rf))", "def project_p2c_image(src, H): #---- project p to c (whole image)\r\n Z = H[2]; phi= H[3]; S= H[4]; TV= H[5]; TU= H[6];\r\n rows= src.shape[0]; cols= src.shape[1]; # get image size info\r\n diag= np.sqrt(rows**2+cols**2); # diagnol length\r\n radi= int(diag*S/2*1.1); # radius of new plot should be larger\r\n dest= np.zeros((radi*2,radi*2,3)) # projection result\r\n cosf= np.cos(phi); sinf= np.sin(phi); # rotation parameters\r\n u0 = radi-(TU-np.floor(TU)); # only process fractional part\r\n v0 = radi-(TV-np.floor(TV)); # of TU and TV\r\n kv = np.arange(0,radi*2); # \r\n #--- ---\r\n srcx= src.copy();\r\n srcx[0,:,:]=0; srcx[rows-2:rows,:,:]=0; \r\n srcx[:,0,:]=0; srcx[:,cols-2:cols,:]=0;\r\n #--- mapping ---\r\n for ku in range(0,radi*2): # scan each column\r\n UP = (ku-u0)/S; VP= (kv-v0)/S; # correct tu,tv,s\r\n RP =-sinf*UP + cosf*VP;\r\n CP = cosf*UP + sinf*VP; # correct rotation phi\r\n theta= CP/Z; # horizontal angle\r\n C = Z*np.tan(theta) + cols/2;\r\n R = RP/np.cos(theta) + rows/2;\r\n #--- interpolation ---\r\n C = np.minimum(np.maximum(C, 0), cols-2);\r\n R = np.minimum(np.maximum(R, 0), rows-2); \r\n C0 = np.floor(C).astype(int); C1= C-C0; \r\n R0 = np.floor(R).astype(int); R1= R-R0; \r\n for m in range(0,3):\r\n pixel = srcx[R0 ,C0 ,m]*(1-R1)*(1-C1);\r\n pixel+= srcx[R0 ,C0+1,m]*(1-R1)*( C1);\r\n pixel+= srcx[R0+1,C0 ,m]*( R1)*(1-C1);\r\n pixel+= srcx[R0+1,C0+1,m]*( R1)*( C1);\r\n dest[kv,ku,m]= pixel; \r\n return dest", "def write_header(outfbfile, header_params, header):\n for hp in header_params:\n hdrval = sigproc.addto_hdr(hp, header[hp])\n outfbfile.write(hdrval)", "def PImageT2Spec (inImage, outImage, nTerm, \n inCCVer, outCCVer, err,\n refFreq=1.0e9, terms=None, startCC=1, endCC=0, \n dropNeg=True, dist=None):\n ################################################################\n # Checks\n if not Image.PIsA(inImage):\n raise TypeError,\"inImage MUST be a Python Obit Image\"\n if not Image.PIsA(outImage):\n raise TypeError,\"outImage MUST be a Python Obit Image\"\n if not OErr.OErrIsA(err):\n raise TypeError,\"err MUST be an OErr\"\n\n # Limit on distance\n if dist==None:\n limit = 1.0e20\n else:\n limit = dist\n inImage.List.set(\"Limit\", limit) # Save on info list\n inImage.List.set(\"dropNeg\", dropNeg) \n\n # Update output header\n d = outImage.Desc.Dict\n d['ctype'][2] = 'SPECLOGF'\n d['crval'][2] = refFreq\n outImage.Desc.Dict = d;\n outImage.UpdateDesc(err)\n\n # Merge CCs to temp cc table\n tmpCCver = Image.PGetHighVer(inImage, \"AIPS CC\") + 1;\n inTab = inImage.NewTable(Image.READONLY, \"AIPS CC\", inCCVer, err)\n noParms = inTab.Desc.List.Dict[\"NO_PARMS\"][2][0]\n tmpTab = inImage.NewTable(Image.WRITEONLY, \"AIPS CC\", tmpCCver, err, noParms=noParms)\n TableUtil.PCCMerge(inTab, tmpTab, err)\n # Fix spectrum if needed\n if terms:\n nterm = len(terms)\n Obit.TableCCUtilFixTSpec(inImage.me, tmpCCver, \\\n refFreq, nterm, terms,\n startCC, endCC, err.me)\n if err.isErr:\n OErr.printErrMsg(err, \"Error Adjusting spectrum of CC Table\")\n # Convert\n outImage.me = Obit.ImageUtilT2Spec(inImage.me, outImage.me, nTerm, tmpCCver, \\\n outCCVer, startCC, endCC, err.me)\n if err.isErr:\n OErr.printErrMsg(err, \"Error Converting image/CC Table\")\n # Delete temporary CC table\n inImage.ZapTable(\"AIPS CC\", tmpCCver, err)\n # Do history for spectrum modification\n pgmName = OSystem.PGetPgmName()\n outHistory = History.History(\"history\", outImage.List, err)\n History.POpen(outHistory, History.READWRITE, err)\n History.PTimeStamp(outHistory,\" Start Obit \"+pgmName,err)\n if terms:\n History.PWriteRec(outHistory,-1,pgmName+\" nterm = \"+str(nterm),err)\n History.PWriteRec(outHistory,-1,pgmName+\" refFreq = \"+str(refFreq ),err)\n History.PWriteRec(outHistory,-1,pgmName+\" terms = \"+str(terms),err)\n History.PClose(outHistory, err)", "def __init__(self,inp='INP.mcnp'):\n # Material dictionary for the moderator, light guide, and detector\n self.material = {'Moderator':None,'Detector':None,'LightGuide':None}\n self.material['Detector'] = {'name':'Detector','mt': 3, 'rho': 1.1,'matString':None} # detector\n self.material['LightGuide'] = {'name': 'PMMA','mt':10, 'rho':0.93} # PMMA\n self.material['Moderator'] = {'name':'HDPE','mt':456, 'rho': 0.93} # HPDE\n \n # Cell and Surface Inital Numbering\n self.CellStartNum = 600\n self.SurfaceStartNum = 600\n self.ZeroSurfaceNum = 500\n self.UniverseNum = 200\n self.surfGeo = None\n self.inp = inp\n self.name = 'OUT_'+self.inp.strip('.mcnp')+'.'\n self.setMaterial(0.1,'PS')", "def init(self):\n # pC es un puntero a la imagen en C\n nP = len(self.puntos)\n self.pC = c_colision.malloc_tren(nP)\n # Estas magnitudes se inicializan en la imagen y\n # ya no se tocan desde python\n for i in range(nP):\n self.pC.contents.P[i]= pointer(PyC_Punto(self.puntos[i]))\n self.colision = self.pC.contents.colision = 0", "def generateHeader(param_dict, filename_out, test_mode=False, template=\"uvfits_headers/header.tpl\"):\n findAndReplace(param_dict, template,filename_out, test_mode)", "def process_set_metadata(self, data, set_name):\n hdf5_handler = self.hdf5_manager.get_group(set_name)\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n if 'test' in set_name:\n is_test = True\n data_ = data[0]\n filename_ids = data[1]\n annotations = data[2]\n category = data[3]\n supercategory = data[4]\n category_id = data[5]\n else:\n is_test = False\n data_ = data[0]\n annotations = data[1]\n annotation_id_dict = data[2]\n category = data[3]\n supercategory = data[4]\n category_id = data[5]\n filename_ids = data[6]\n images_fname_by_id = data[7]\n skeleton = data[8]\n keypoints = data[9]\n\n keypoints_ = str2ascii(keypoints)\n skeleton_ = np.array(pad_list(skeleton, -1), dtype=np.uint8)\n\n category_ = str2ascii(category)\n supercategory_ = str2ascii(supercategory)\n\n image_filenames = []\n coco_urls = []\n width = []\n height = []\n image_id = []\n\n annotation_id = []\n area = []\n iscrowd = [0, 1]\n segmentation = []\n num_keypoints = list(range(0, 17 + 1))\n keypoints_list = []\n bbox = []\n object_id = []\n\n # coco id lists\n # These are order by entry like in the annotation files.\n # I.e., coco_images_ids[0] has the object_id with the file_name, id, height, etc.\n # as coco_annotation_file[set_name][\"images\"][0]\n coco_images_ids = []\n coco_categories_ids = []\n coco_annotations_ids = []\n\n if is_test:\n object_fields = [\"image_filenames\", \"coco_urls\", \"width\", \"height\"]\n else:\n object_fields = [\"image_filenames\", \"coco_urls\", \"width\", \"height\",\n \"category\", \"supercategory\", \"boxes\", \"area\",\n \"iscrowd\", \"segmentation\",\n \"image_id\", \"category_id\", \"annotation_id\",\n \"num_keypoints\", \"keypoints\"]\n\n list_boxes_per_image = []\n list_keypoints_per_image = []\n list_object_ids_per_image = []\n list_image_filenames_per_num_keypoints = []\n list_object_ids_per_keypoint = [] # body part\n\n if self.verbose:\n print('> Adding data to default group:')\n prgbar = progressbar.ProgressBar(max_value=len(data_))\n\n counter = 0\n tmp_coco_annotations_ids = {}\n\n for i, key in enumerate(data_):\n annotation = data_[key]\n image_filenames.append(annotation[\"file_name\"])\n width.append(annotation[\"width\"])\n height.append(annotation[\"height\"])\n coco_urls.append(annotation[\"coco_url\"])\n image_id.append(annotation[\"id\"])\n\n if is_test:\n # *** object_id ***\n # [filename, coco_url, width, height]\n object_id.append([i, i, i, i])\n list_object_ids_per_image.append([i])\n else:\n boxes_per_image = []\n\n if \"object\" in annotation:\n for j, obj_idx in enumerate(annotation[\"object\"]):\n obj = annotation[\"object\"][obj_idx]\n area.append(obj[\"area\"])\n bbox.append(obj[\"bbox\"])\n annotation_id.append(obj[\"id\"])\n segmentation.append(obj[\"segmentation\"])\n keypoints_list.append(obj[\"keypoints\"])\n\n # *** object_id ***\n # [filename, coco_url, width, height,\n # category, supercategory,\n # bbox, area, iscrowd, segmentation,\n # \"image_id\", \"category_id\", \"annotation_id\"\n # \"num_keypoints\", \"keypoints\"]\n object_id.append([i, i, i, i,\n category.index(obj[\"category\"]), supercategory.index(\n obj[\"supercategory\"]),\n counter, counter, obj[\"iscrowd\"], counter,\n i, category.index(obj[\"category\"]), counter,\n obj[\"num_keypoints\"], counter])\n\n boxes_per_image.append(counter)\n\n # temporary var\n tmp_coco_annotations_ids[obj[\"id\"]] = counter\n\n # update counter\n counter += 1\n\n list_boxes_per_image.append(boxes_per_image)\n list_keypoints_per_image.append(boxes_per_image)\n list_object_ids_per_image.append(boxes_per_image)\n\n # update progressbar\n if self.verbose:\n prgbar.update(i)\n\n # update progressbar\n if self.verbose:\n prgbar.finish()\n\n if self.verbose:\n print('> Processing coco lists:')\n prgbar = progressbar.ProgressBar(max_value=len(annotations['images']))\n\n # set coco id lists\n for i, annot in enumerate(annotations['images']):\n fname_id = image_filenames.index(os.path.join(image_dir, annot['file_name']))\n coco_images_ids.append(fname_id)\n\n # update progressbar\n if self.verbose:\n prgbar.update(i)\n\n # update progressbar\n if self.verbose:\n prgbar.finish()\n\n coco_categories_ids = list(range(len(category)))\n\n if not is_test:\n if self.verbose:\n prgbar = progressbar.ProgressBar(max_value=len(annotations['annotations']))\n for i, annot in enumerate(annotations['annotations']):\n annot_id = tmp_coco_annotations_ids[annot['id']]\n coco_annotations_ids.append(annot_id)\n\n # update progressbar\n if self.verbose:\n prgbar.update(i)\n\n # update progressbar\n if self.verbose:\n prgbar.finish()\n\n # process lists\n if not is_test:\n if self.verbose:\n print('> Processing lists...')\n\n for i in range(len(keypoints)):\n imgs_per_num = [val[0] for _, val in enumerate(object_id) if val[8] == i]\n imgs_per_num = list(set(imgs_per_num)) # get unique values\n imgs_per_num.sort()\n list_image_filenames_per_num_keypoints.append(imgs_per_num)\n\n for i in range(len(keypoints)):\n objs_per_keypoint = [j for j, val in enumerate(\n keypoints_list) if val[i * 3] > 0 or val[i * 3 + 1] > 0]\n objs_per_keypoint = list(set(objs_per_keypoint)) # get unique values\n objs_per_keypoint.sort()\n list_object_ids_per_keypoint.append(objs_per_keypoint)\n\n hdf5_write_data(hdf5_handler, 'image_filenames',\n str2ascii(image_filenames), dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'coco_urls',\n str2ascii(coco_urls), dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'width',\n np.array(width, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'height',\n np.array(height, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'category',\n category_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'supercategory',\n supercategory_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'image_id',\n np.array(image_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'category_id',\n np.array(category_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'object_ids',\n np.array(object_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'object_fields',\n str2ascii(object_fields), dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'coco_images_ids',\n np.array(coco_images_ids, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'coco_categories_ids',\n np.array(coco_categories_ids, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'list_object_ids_per_image',\n np.array(pad_list(list_object_ids_per_image, -1), dtype=np.int32),\n fillvalue=-1)\n\n if not is_test:\n hdf5_write_data(hdf5_handler, 'annotation_id',\n np.array(annotation_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'keypoint_names',\n keypoints_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'skeleton',\n skeleton_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'boxes',\n np.array(bbox, dtype=np.float),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'iscrowd',\n np.array(iscrowd, dtype=np.uint8),\n fillvalue=-1)\n\n nrows = len(segmentation)\n ncols = max([len(elem) for elem in segmentation])\n dset = hdf5_handler.create_dataset('segmentation',\n (nrows, ncols),\n dtype=np.float,\n chunks=True,\n compression=\"gzip\",\n compression_opts=4,\n fillvalue=-1)\n\n if self.verbose:\n print(' -- Saving segmentation masks to disk (this will take some time)')\n prgbar = progressbar.ProgressBar(max_value=nrows)\n for i in range(nrows):\n dset[i, :len(segmentation[i])] = np.array(segmentation[i], dtype=np.float)\n if self.verbose:\n prgbar.update(i)\n\n if self.verbose:\n prgbar.finish()\n\n hdf5_write_data(hdf5_handler, 'area',\n np.array(area, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'num_keypoints',\n np.array(num_keypoints, dtype=np.uint8),\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'keypoints',\n np.array(keypoints_list, dtype=np.int32),\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'coco_annotations_ids',\n np.array(coco_annotations_ids, dtype=np.int32),\n fillvalue=-1)\n\n pad_value = -1\n hdf5_write_data(hdf5_handler, 'list_boxes_per_image',\n np.array(pad_list(list_boxes_per_image, pad_value), dtype=np.int32),\n fillvalue=pad_value)\n hdf5_write_data(hdf5_handler, 'list_keypoints_per_image',\n np.array(pad_list(list_keypoints_per_image, pad_value), dtype=np.int32),\n fillvalue=pad_value)\n hdf5_write_data(hdf5_handler, 'list_image_filenames_per_num_keypoints',\n np.array(pad_list(list_image_filenames_per_num_keypoints,\n pad_value), dtype=np.int32),\n fillvalue=pad_value)\n hdf5_write_data(hdf5_handler, 'list_object_ids_per_keypoint',\n np.array(pad_list(list_object_ids_per_keypoint,\n pad_value), dtype=np.int32),\n fillvalue=pad_value)", "def coadd(template_header, output_dir, input_dir, output=None, add_type=None):\n img_dir = input_dir\n # output is either 'weights' or 'int'\n if output is None:\n reprojected_table = os.path.join(img_dir, 'reprojected.tbl')\n out_image = os.path.join(output_dir, 'mosaic.fits')\n else:\n reprojected_table = os.path.join(img_dir, output + '_reprojected.tbl')\n out_image = os.path.join(output_dir, output + '_mosaic.fits')\n montage.mAdd(reprojected_table, template_header, out_image, img_dir=img_dir, exact=True, type=add_type)", "def fix_headers(hParams,testMode=False):\n \n \n fileList = glob.glob(hParams['fileList'])\n for oneFile in fileList:\n with fits.open(oneFile,'update') as HDUList_orig:\n if testMode == True:\n print(\"Doing a dry run without modifying headers\")\n HDUList = fits.HDUList([fits.PrimaryHDU(None,header=HDUList_orig[0].header)])\n primHead = HDUList[0].header\n else:\n primHead = HDUList_orig[0].header\n\n colcorner = hParams['COLCORNR'][primHead['SCA_ID']]\n rowcorner = hParams['ROWCORNR'][primHead['SCA_ID']]\n \n detTiming = pynrc.pynrc_core.DetectorOps(detector=481,\n wind_mode=hParams['wind_mode'],\n xpix=hParams['xpix'],\n ypix=hParams['ypix'],\n x0=colcorner-1,\n y0=rowcorner-1,\n nint=hParams['nint'],\n ngroup=hParams['ngroup'],\n nf=hParams['nf'])\n correctHead = detTiming.make_header()\n\n obsId = primHead['OBS_ID']\n if obsId in hParams['expStart'].keys():\n expStart = hParams['expStart'][obsId]\n date, time = expStart.split('T')\n primHead['DATE-OBS'] = date\n primHead['TIME-OBS'] = time\n \n t_expStart = Time(expStart)\n t_expEnd = t_expStart + correctHead['EXPTIME'] * u.second\n expEnd = t_expEnd.fits\n date, time = expEnd.split('T')\n primHead['DATE-END'] = date\n primHead['TIME-END'] = time\n else:\n print(\"Couldn't find exp start for {}\".format(obsId))\n \n\n for oneKey in ['TFRAME','TGROUP','INTTIME','EXPTIME',\n 'TREFROW','BREFROW','LREFCOL','RREFCOL',\n 'COLCORNR','ROWCORNR']:\n primHead[oneKey] = correctHead[oneKey]\n \n if hParams['wind_mode'] == 'WINDOW':\n primHead['HWINMODE'] = 'ENABLE'\n else:\n primHead['HWINMODE'] = 'DISABLE'\n primHead['DETECTOR'] = detectorDict[primHead['SCA_ID']]\n \n primHead['TLDYNEID'] = hParams['teledyneID'][primHead['SCA_ID']]\n if testMode == True:\n pdb.set_trace()", "def doPhot(imphttab, obsmode, hdr):\n\n (photflam, photfnu, photbw, photplam, photzpt) = \\\n readImPhtTab(imphttab, obsmode)\n\n hdr[\"photflam\"] = photflam\n hdr[\"photfnu\"] = photfnu\n hdr[\"photbw\"] = photbw\n hdr[\"photplam\"] = photplam\n hdr[\"photzpt\"] = photzpt", "def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )", "def mo_parse_p(self, filepath):\n\n # Now, can reprocess using tesseract-ocr rather than pdftotext\n ptext = textract.process(filepath, method='tesseract', encoding='utf-8')\n ptext = ptext.replace(b'\\xe2\\x80\\x94', b'-')\n ptext = ptext.decode('utf-8')\n keys = list(self.mo_coefficient_name_map.keys())\n\n # Get the calibration date:\n for line in ptext.splitlines():\n if 'CALIBRATION DATE' in line:\n items = line.split()\n ind = items.index('DATE:')\n cal_date = items[ind+1]\n cal_date = pd.to_datetime(cal_date).strftime('%Y%m%d')\n self.date.update({len(self.date): cal_date})\n\n if 'psia S/N' in line:\n items = line.split()\n ind = items.index('psia')\n prange = items[ind-1]\n name = self.mo_coefficient_name_map.get('prange')\n self.coefficients.update({name: prange})\n\n # Loop through each line looking for the lines which contain\n # calibration coefficients\n if '=' in line:\n # Tesseract-ocr misreads '0' as O, and 1 as IL\n line = line.replace('O', '0').replace('IL', '1').replace(\n '=', '').replace(',.', '.').replace(',', '.')\n line = line.replace('L', '1').replace('@', '0').replace('l', '1').replace('--', '-')\n if '11' in line and 'PA2' not in line:\n line = line.replace('11', '1')\n items = line.split()\n for n, k in enumerate(items):\n if k.lower() in keys:\n try:\n float(items[n+1])\n name = self.mo_coefficient_name_map.get(k.lower())\n self.coefficients.update({name: items[n+1]})\n except:\n pass\n if 'CC_ptcb2' not in list(self.mo_coefficient_name_map.keys()):\n self.coefficients.update({'CC_ptcb2': '0.000000e+000'})", "def _make_image_info_hst(self, flistname):\n\n flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n #fname=line.strip()\n flist.append(fname)\n magzp_list.append(magzp)\n\n magzp = np.array(magzp_list)\n\n nimage = len(flist)\n\n path_len = max([len(f) for f in flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f.replace('sci.fits','wht.fits')\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def set_energy_params(econt_new, ef, para_check):\n evscal = get_Ry2eV()\n\n for key, val in econt_new.items():\n if key in ['kmesh', 'BZDIVIDE', 'KMESH', 'bzdivide']:\n key = 'BZDIVIDE'\n elif key in ['nepts', 'NPT2']:\n key = 'NPT2'\n # also add IEMXD which has to be big enough\n para_check.set_value('IEMXD', val, silent=True)\n elif key in ['emin', 'EMIN']:\n key = 'EMIN'\n val = (ef + val / evscal) # converting the Energy value to Ry while the fermi_energy in Ry\n elif key in ['emax', 'EMAX']:\n key = 'EMAX'\n val = (ef + val / evscal) # Converting to the Ry (unit of the energy)\n elif key in ['tempr', 'TEMPR']:\n key = 'TEMPR'\n elif key in ['RCLUSTZ', 'rclustz']:\n key = 'RCLUSTZ'\n para_check.set_value(key, val, silent=True)\n\n # set the rest of the DOS contour\n para_check.set_multiple_values(\n NPT1=0,\n NPT3=0,\n NPOL=0,\n use_semi_circle_contour=False, # this is needed to get a DOS contour\n )\n\n # set KPOIBZ to match BZDIVIDE setting\n # this is only done if 'KPOIBZ' is not given already in the input\n bzdiv = para_check.get_value('BZDIVIDE')\n if bzdiv is not None and 'KPOIBZ' not in econt_new:\n para_check.set_value('KPOIBZ', np.prod(bzdiv), silent=True)\n # we need to make sure to deactivate the semi-circle contour, otherwise DOS contour is not used\n para_check.set_value('<USE_SEMI_CIRCLE_CONTOUR>', False, silent=True)\n\n return para_check", "def get_primary_header(input_lst):\n lst = [\n # 12345678 12345678901234567890123456789012345678901234567\n ('SIMPLE' , 'file does conform to FITS standard' ),\n ('BITPIX' , 'number of bits per data pixel' ),\n ('NAXIS' , 'number of data axes' ),\n ('NAXIS1' , 'length of data axis 1' ),\n ('NAXIS2' , 'length of data axis 2' ),\n ('BSCALE' , 'factor to linearly scale the data pixel values' ),\n ('BZERO' , 'offset to linearly scale the data pixel values' ),\n ('BUNIT' , 'physical unit of the data pixel values' ),\n ('BLANK' , 'value representing undefined physical values' ),\n ('DISPAXIS', 'main dispersion axis of the spectral data' ),\n ('DATATYPE', 'type of data (calibration/science)' ),\n ('OBJECT' , 'object observed' ),\n ('DATE-OBS', 'start date of observation run' ),\n ('MJD-OBS' , 'Modified Julian Date of observation run' ),\n ('TIMESYS' , 'time system' ),\n ('FRAMEID' , 'frame ID in observation run' ),\n ('RA' , 'right ascension of object' ),\n ('DEC' , 'declination of object' ),\n ('RADESYS' , 'name of reference frame' ),\n ('EQUINOX' , 'epoch of the mean equator and equinox in years' ),\n ('EXPTIME' , 'exposure time in seconds' ),\n ('PHO-OFF' , 'offset of photon middle time' ),\n ('UTC-STA' , 'UTC at start of exposure' ),\n ('UTC-MID' , 'UTC at middle of exposure' ),\n ('UTC-PHO' , 'UTC at photon middle of exposure' ),\n ('UTC-END' , 'UTC at end of exposure' ),\n ('LT-STA' , 'local time at start of exposure' ),\n ('LT-MID' , 'local time at middle of exposure' ),\n ('LT-PHO' , 'local time at photon middle of exposure' ),\n ('LT-END' , 'local time at end of exposure' ),\n ('LST-STA' , 'local sidereal time at start' ),\n ('LST-MID' , 'local sidereal time at middle' ),\n ('LST-PHO' , 'local sidereal time at photon middle' ),\n ('LST-END' , 'local sidereal time at end' ),\n ('MJD-STA' , 'Modified Julian Date of UTC-STA' ),\n ('MJD-MID' , 'Modified Julian Date of UTC-MID' ),\n ('MJD-PHO' , 'Modified Julian Date of UTC-PHO' ),\n ('MJD-END' , 'Modified Julian Date of UTC-END' ),\n ('AIRM-STA', 'airmass at start of exposure' ),\n ('AIRM-MID', 'airmass at middle of exposure' ),\n ('AIRM-PHO', 'airmass at photon middle of exposure' ),\n ('AIRM-END', 'airmass at end of exposure' ),\n ('AIRMASS' , 'effective airmass during exposure' ),\n ('ALT-STA' , 'telescope altitude at start' ),\n ('ALT-MID' , 'telescope altitude at middle' ),\n ('ALT-PHO' , 'telescope altitude at photon middle' ),\n ('ALT-END' , 'telescope altitude at end' ),\n ('AZ-STA' , 'telescope azimuth at start' ),\n ('AZ-MID' , 'telescope azimuth at middle' ),\n ('AZ-PHO' , 'telescope azimuth at photon middle' ),\n ('AZ-END' , 'telescope azimuth at end' ),\n ('MOON-AGE', 'days past new moon at middle of exposure' ),\n ('MOON-ALT', 'moon altitude at middle of exposure' ),\n ('MOON-AZ' , 'moon azimuth at middle of exposure' ),\n ('MOON-DIS', 'angular distance to moon (in degree)' ),\n ('TWI-END' , 'end time of astronomical twilight in UTC' ),\n ('TWI-STA' , 'start time of astronomical twilight in UTC' ),\n ('PROP-ID' , 'proposal ID' ),\n ('PROP-TIT', 'title of proposal' ),\n ('PROP-PI' , 'principal investigator of proposal' ),\n ('OBSERVER', 'people who acquire the data' ),\n ('OBSERVAT', 'observatory where the data is acquired' ),\n ('TELESCOP', 'telescope used to acquire the data' ),\n ('OBS-LONG', 'longitude of the telescope' ), \n ('OBS-LAT' , 'latitude of the telescope' ),\n ('OBS-ALT' , 'altitude of the telescope in meter' ),\n ('INSTRUME', 'instrument used to acquire the data' ),\n ('SETUP-ID', 'ID of the instrument setup' ),\n ('SLT-WID' , 'slit width (in mm)' ),\n ('SLT-LEN' , 'slit length (in mm)' ),\n ('NCHANNEL', 'number of simultaneous channels' ),\n ('CHANNEL1', 'object of channel 1' ),\n ('CHANNEL2', 'object of channel 2' ),\n ('FILTER1' , 'filter in channel 1' ),\n ('FILTER2' , 'filter in channel 2' ),\n ('EXPMETER', 'usage of exposure meter' ),\n ('SHAK_STA', 'status of fiber shaker (on/off)' ),\n ('SHAK_FRE', 'frequency of fiber shaker (in Hz)' ),\n ('SHAK_AMP', 'amplitude of fiber shaker' ),\n ('DETECTOR', 'detector used to acquire the data' ),\n ('GAIN' , 'readout gain of detector (in electron/ADU)' ),\n ('RO-SPEED', 'read out speed of detector' ),\n ('RO-NOISE', 'read out noise of detector' ),\n ('BINAXIS1', 'binning factor of data axis 1' ),\n ('BINAXIS2', 'binning factor of data axis 2' ),\n ('TEMP-DET', 'temperature of detector (in degree)' ),\n ('TEMP-BOX', 'temperature inside instrument box (in degree)' ),\n ('TEMP-ROO', 'temperature inside instrument room (in degree)' ),\n ('PRES-BOX', 'pressure inside instrument box (in hPa)' ),\n ('DATE' , 'file creation date' ),\n ('ORI-NAME', 'original filename' ),\n ('ORIGIN' , 'organization responsible for the FITS file' ),\n ('HEADVER' , 'version of header' ),\n ]\n now = datetime.datetime.now()\n header_lst = []\n for key, comment in lst:\n if key in input_lst.keys():\n value = input_lst[key]\n else:\n value = None\n if type(value) == type('a'):\n value = \"'%-8s'\"%value\n value = value.ljust(20)\n elif type(value) == type(u'a'):\n value = value.encode('ascii','replace')\n value = \"'%-8s'\"%value\n value = value.ljust(20)\n elif type(value) == type(1):\n value = '%20d'%value\n elif type(value) == type(1.0):\n if key[0:4]=='MJD-':\n # for any keywords related to MJD, keep 6 decimal places.\n # for reference, 1 sec = 1.16e-5 days\n value = '%20.6f'%value\n else:\n value = str(value).rjust(20)\n value = value.replace('e','E')\n elif type(value) == type(now):\n # if value is a python datetime object\n value = \"'%04d-%02d-%02dT%02d:%02d:%02d.%03d'\"%(\n value.year, value.month, value.day,\n value.hour, value.minute, value.second,\n int(round(value.microsecond*1e-3))\n )\n elif value == True:\n value = 'T'.rjust(20)\n elif value == False:\n value = 'F'.rjust(20)\n elif value == None:\n value = \"''\".ljust(20)\n else:\n print('Unknown value: {}'.format(value))\n string = '%-8s= %s / %s'%(key,value,comment)\n if len(string)>=80:\n string = string[0:80]\n else:\n string = string.ljust(80)\n\n header_lst.append(string)\n\n return header_lst", "def create_image_caption_pairs(self):", "def __init__(self, config, set_name, preprocess_image):\n\t\t\tself.data_dir = config['data_dir']\n\t\t\tself.set_name = set_name\n\t\t\tself.coco = COCO(os.path.join(self.data_dir, 'annotations', 'instances_' + set_name + '.json'))\n\t\t\tself.image_ids = self.coco.getImgIds()\n\t\t\tself.mask = config['mask']\n\n\t\t\tself.load_classes()\n\n\t\t\tsuper(CocoGenerator, self).__from_config__(config, preprocess_image=preprocess_image)", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def construct_param_dict(params,K_RC,K_CP,m_P):\n ###scaling constants\n w=params['w']\n pd=params['pd'] # in 3D and 0.21 in 2D\n pv=params['pv']\n Er=params['Er'] ;Ek=params['Ek']\n ER=params['ER'];EC=params['EC'];EP=params['EP'];\n Eq1=params['Eq1'];Eq2=params['Eq2']\n\n\n #capture success function\n a = params['a']\n b = params['b']\n c = params['c']\n formC = params['formC']\n formPC = params['formPC']\n formPR = params['formPR']\n \n ###variables\n TR= params['TR'] ;TC= params['TC'];TP=params['TP'];D_R= params['D_R']; D_C= params['D_C']\n K_RP=K_RC*K_CP\n fmC=params['fmC'];thermyR=params['thermyR']\n thermyC=params['thermyC'];thermyP=params['thermyP']\n fmPR=params['fmPR']\n fmPC=params['fmPC']\n m_C = K_CP*m_P;m_R = K_RP*m_P\n ###normalization constants and boltzmann constant\n r0 = params['r0']\n k0 = params['k0'] # will depend on the productivity of the habitat\n a01 = a02 = params['a012'] # will depedend on the dimension of the habitat \n a03 = params['a03']\n d0= params['d0']\n q10 = params['q10'];q20 = params['q20'];\n v0R = params['v0R'];v0C =params['v0C'];v0P =params['v0P'];k = b_k\n hC0 = params['hC0'];hP0 = params['hP0'] \n \n #intrapopulation parameters\n q1=set_q1(q10,m_C,w,Eq1,TR,k)\n q2=set_q2(q20,m_P,w,Eq2,TC,k)\n K=set_K(k0,m_R,w,Ek,TR,k)\n r=set_r(r0,m_R,w,Er,TR,k)\n\n #interpopulation parameters\n a1=set_alfa(m_C,a01,K_RC,pv,pd,TR,TC,ER,EC,D_R,v0R,v0C,g,alfa,fmC,thermyR,thermyC,k,a,b,c,formC)\n a2=set_alfa(m_P,a02,K_RP,pv,pd,TR,TP,ER,EP,D_R,v0R,v0P,g,alfa,fmPR,thermyR,thermyP,k,a,b,c,formPR)\n a3=set_alfa(m_P,a03,K_CP,pv,pd,TC,TP,EC,EP,D_C,v0C,v0P,g,alfa,fmPC,thermyC,thermyP,k,a,b,c,formPC)\n\n t_hp = set_th(hP0,m_P,w,EP,k,TP)\n t_hc = set_th(hC0,m_C,w,EC,k,TC)\n param_dict={'q1':q1,'q2':q2,'K':K,'r':r,'a1':a1,'a2':a2,'a3':a3,'t_hp':t_hp,'t_hc':t_hc}\n \n return param_dict", "def __init__(self, encut, ldaul, Uparam, Jparam, name=\"DFTU_settings\"):\n\n dftu_settings = {\"LDAU\": \".TRUE.\" , \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LADAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)", "def set_params2D(ima, p, xform = \"xform.align2d\"):\n\tt = Transform({\"type\":\"2D\",\"alpha\":p[0],\"tx\":p[1],\"ty\":p[2],\"mirror\":p[3],\"scale\":p[4]})\n\tima.set_attr(xform, t)", "def preprocess(self):", "def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h", "def cpf(self, cpf):\n self._cpf = cpf", "def _transform_image(self, tfm, k_tfm, img0):\n img_list = []\n\n for k in range(k_tfm):\n img_list.append(tfm(img0))\n\n img = img_list\n if len(img) == 1:\n img = img[0]\n\n return img", "def _setbgpic(self, item, image):\n self.cv.itemconfig(item, image=image)\n self.cv.tag_lower(item)", "def defineBLOCKSECTION(f,layernamelist):\r\n feilinname_lineheight=2.5\r\n #note_lineheigh=4\r\n layercount=0\r\n feilin_name_pos=[70.0+globalconfig.CUTLINE_X_OFFSET,185.0+globalconfig.CUTLINE_Y_OFFSET]\r\n #note_pos=[190.0+globalconfig.CUTLINE_X_OFFSET,80.0+globalconfig.CUTLINE_Y_OFFSET]\r\n f.write(\"0\\nSECTION\\n2\\nBLOCKS\\n\") #绘制块定义\r\n f.write(\"0\\nBLOCK\\n8\\n0\\n2\\nROUND_1\\n70\\n0\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n\")\r\n f.write(\"0\\nPOLYLINE\\n8\\n0\\n5\\n3F\\n66\\n1\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n70\\n1\\n\")\r\n f.write(\"40\\n0.04\\n41\\n0.04\")\r\n f.write(\"\\n0\\nVERTEX\\n5\\n406\\n8\\n0\\n10\\n-0.02\\n20\\n0.0\\n30\\n0.0\\n42\\n1.0\")\r\n f.write(\"\\n0\\nVERTEX\\n5\\n407\\n8\\n0\\n10\\n0.02\\n20\\n0.0\\n30\\n0.0\\n42\\n1.0\\n0\\nSEQEND\\n5\\n408\\n8\\n0\\n\")\r\n f.write(\"0\\nENDBLK\\n5\\n43\\n8\\n0\\n\") \r\n \r\n for layername in layernamelist:\r\n layercount=layercount+1\r\n f.write(\"0\\nBLOCK\\n8\\n0\\n2\\n*U\"+str(layercount)+\"\\n\") \r\n f.write(\"70\\n1\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n\") \r\n f.write(\"0\\nTEXT\\n5\\n46\\n8\\n\"+layername+\"\\n6\\nCONTINUOUS\\n10\\n\"+str(feilin_name_pos[0])+\"\\n20\\n\"+str(feilin_name_pos[1])+\"\\n30\\n0.0\\n\")\r\n f.write(\"40\\n\"+str(feilinname_lineheight)+\"\\n1\\n\"+globalconfig.NAME_OF_FEILIN+\"-\"+layername+\"\\n0\\nENDBLK\\n5\\n47\\n8\\n\"+layername+\"\\n\")\r\n \r\n# layercount=layercount+1\r\n# f.write(\"0\\nBLOCK\\n8\\n0\\n2\\n*U\"+str(layercount)+\"\\n\") \r\n# f.write(\"70\\n1\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n\") \r\n# f.write(\"0\\nTEXT\\n5\\n46\\n8\\n\"+layername+\"\\n6\\nCONTINUOUS\\n10\\n\"+str(note_pos[0])+\"\\n20\\n\"+str(note_pos[1])+\"\\n30\\n0.0\\n\")\r\n# f.write(\"40\\n\"+str(note_lineheigh)+\"\\n1\\n\")\r\n# f.write(\"\")\r\n# f.write(\"\\n0\\nENDBLK\\n5\\n47\\n8\\n0\\n\") \r\n \r\n f.write(\"0\\nENDSEC\\n\")", "def init_from_header(self, hdr, nofn=False):\n\n global filtfn, fmtch, ftypes\n\n self.hdr = hdr\n getwcs = False\n\n try:\n self.target = hdr['OBJECT']\n except KeyError:\n pass\n\n for d in ('DATE-OBS', 'DATE', '_ATE'):\n try:\n self.date = Time(hdr[d]).datetime\n break\n except KeyError:\n pass\n if self.date is None:\n raise RemFitsErr(\"No date found in hheader\")\n\n for t in ('CCDTEMP', 'TEMPCHIP'):\n try:\n self.ccdtemp = hdr[t]\n break\n except KeyError:\n pass\n if self.ccdtemp is None:\n raise RemFitsErr(\"No temperature found in hheader\")\n\n dfmtd = self.date.strftime(\"%d/%m/%Y @ %H:%M:%S\")\n\n try:\n self.filter = hdr['FILTER']\n except KeyError:\n pass\n\n if nofn:\n if self.filter is None:\n raise RemFitsErr(\"No filename and no filter given\")\n self.ftype = \"REMIR file\"\n getwcs = True\n self.description = \"REMIR file dated \" + dfmtd\n try:\n self.endx = self.ncolumns = hdr['NAXIS1']\n self.endy = self.nrows = hdr['NAXIS2']\n except KeyError:\n raise RemFitsErr(\"Dimensions of data not given in FITS header\")\n elif self.filter in remir_types:\n self.description = \"REMIR Image file dated \" + dfmtd\n self.ftype = 'Image'\n self.startx = self.starty = 0\n self.endx = self.endy = 512\n getwcs = True\n else:\n try:\n ifname = hdr['FILENAME']\n except KeyError:\n raise RemFitsErr(\"No internal filename in FITS header\")\n\n mtches = fmtch.match(ifname)\n if mtches is None:\n if self.filter is None:\n raise RemFitsErr(\"No filter given and no decipherable filename\")\n try:\n self.ftype = ftypes[ifname[0]][0]\n except KeyError:\n self.ftype = 'Processed image'\n getwcs = True\n else:\n ft, quad = mtches.groups()\n hfilt = filtfn[quad]\n try:\n self.ftype, getwcs = ftypes[ft]\n except KeyError:\n self.ftype = 'Processed image'\n getwcs = True\n if self.filter is None:\n self.filter = hfilt\n elif hfilt != self.filter:\n raise RemFitsErr(\"Conflig on filter types between \" + self.filter + \" and internal filename \" + ifname + \" suggesting \" + hfilt)\n\n self.description = self.ftype + \" dated \" + dfmtd\n\n try:\n self.startx = hdr['startX']\n self.starty = hdr['startY']\n self.endx = hdr['endX']\n self.endy = hdr['endY']\n self.ncolumns = self.endx - self.startx\n self.nrows = self.endy - self.starty\n except KeyError:\n warnings.warn(\"Had to insert geometry\", UserWarning, stacklevel=5)\n self.startx, self.starty, self.ncolumns, self.nrows = remdefaults.get_geom(self.date, self.filter)\n self.endx = self.startx + self.ncolumns\n self.endy = self.starty + self.nrows\n\n if self.startx >= 1024:\n if self.filter not in 'gr':\n raise RemFitsErr(\"Filter \" + self.filter + \" not expected to be on right of CCD\")\n else:\n if self.filter not in 'iz':\n raise RemFitsErr(\"Filter \" + self.filter + \" not expected to be on left of CCD\")\n if self.starty >= 1024:\n if self.filter not in 'gi':\n raise RemFitsErr(\"Filter \" + self.filter + \" not expected to be on top of CCD\")\n else:\n if self.filter not in 'rz':\n raise RemFitsErr(\"Filter \" + self.filter + \" not expected to be on bottom of CCD\")\n\n if getwcs:\n self.wcs = wcscoord.wcscoord(hdr)", "def make_t(self):\n self.img[1, 1:-1] = 1\n self.img[2:-1, self.l_i / 2] = 1\n self.img_name = 'T'", "def model_4_parameters(num_features, num_classes, image_info):\n parameters = {}\n parameters['num_features'] = num_features\n parameters['num_classes'] = num_classes\n if image_info['key'][:5] == \"pavia\":\n parameters['C'] = 1.0\n else:\n parameters['C'] = 40.0\n \n return parameters", "def new_param(ini_file='new_param.ini'):\n ### READ THE INI FILE ###\n config.read(ini_file)\n print 'Read the file ',ini_file\n ##~~~~~~ file_in ~~~~~~##\n file_in=config.get('file_in','file_in')\n ##~~~~~~ file_out ~~~~~~##\n file_out=config.get('file_out','file_out')\n\n ##~~~~~~ factor_values ~~~~~##\n fac_L=config.getfloat('factor_values','fac_L')\n fac_Ks=config.getfloat('factor_values','fac_KS')\n fac_n_o=config.getfloat('factor_values','fac_n_o')\n fac_n_c=config.getfloat('factor_values','fac_n_c')\n\n ##~~~~~~ new_initial_values ~~~~~##\n new_pVs_t0=config.getfloat('new_initial_values','new_pVs_t0')\n new_Vo_t0=config.getfloat('new_initial_values','new_Vo_t0')\n new_Qc_t0=config.getfloat('new_initial_values','new_Qc_t0')\n\n ##~~~~~~ flags ~~~~~~##\n nb_param=config.getfloat('flags','nb_param')\n\n #Reading of parameter file\n print 'Reading parameter file'\n ar_cell_label,ar_coorx,ar_coory,ar_lambda,ar_Xc,ar_dam,ar_tan_beta,ar_tan_beta_channel,ar_L,ar_Ks,\\\n ar_theta_r,ar_theta_s,ar_n_o,ar_n_c,\\\n ar_cell_down,ar_pVs_t0,ar_Vo_t0,ar_Qc_t0,ar_kc\\\n =pm.read_cell_parameters(file_in)\n\n #~~~~~~Change in parameters~~~~~~#\n #Multiplying factors for L, Ks, n_o and n_c\n if fac_L!=1.:\n print 'Change L'\n ar_L=ar_L*fac_L\n if fac_Ks!=1.:\n print 'Change Ks'\n ar_Ks=ar_Ks*fac_Ks\n if fac_n_o!=1.:\n print 'Change n_o'\n ar_n_o=ar_n_o*fac_n_o\n if fac_n_c!=1.:\n print 'Change n_c'\n ar_n_c=ar_n_c*fac_n_c\n #New values for pVs_t0, Vo_t0 and Qc_t0\n if new_pVs_t0!=ar_pVs_t0[0]:\n print 'Change pVs_t0'\n ar_pVs_t0=ar_pVs_t0*0.+new_pVs_t0\n if new_Vo_t0!=ar_Vo_t0[0]:\n print 'Change pVs_t0'\n ar_Vo_t0=ar_Vo_t0*0.+new_Vo_t0\n if new_Qc_t0!=ar_Qc_t0[0]:\n print 'Change pVc_t0'\n ar_Qc_t0=ar_Qc_t0*0.+new_Qc_t0\n\n #~~~~~~Write parameter file~~~~~~#\n tab_param=np.zeros((len(ar_cell_label),nb_param))\n tab_param[:,0]=ar_cell_label\n tab_param[:,1]=ar_coorx\n tab_param[:,2]=ar_coory\n tab_param[:,3]=ar_lambda\n tab_param[:,4]=ar_Xc\n tab_param[:,5]=ar_dam\n tab_param[:,6]=ar_tan_beta\n tab_param[:,7]=ar_tan_beta_channel\n tab_param[:,8]=ar_L\n tab_param[:,9]=ar_Ks\n tab_param[:,10]=ar_theta_r\n tab_param[:,11]=ar_theta_s\n tab_param[:,12]=ar_n_o\n tab_param[:,13]=ar_n_c\n tab_param[:,14]=ar_cell_down\n tab_param[:,15]=ar_pVs_t0\n tab_param[:,16]=ar_Vo_t0\n tab_param[:,17]=ar_Qc_t0\n tab_param[:,18]=ar_kc\n\n np.savetxt(file_out, tab_param)", "def get_lamp_parameters(p, header, filename=None, kind=None):\n\n func_name = __NAME__ + '.get_lamp_parameters()'\n # get relevant (cass/ref) fiber position (for lamp identification)\n gkwargs = dict(return_value=True, dtype=str)\n if p['FIBER'] == 'C':\n p['FIB_POS'] = spirouImage.ReadParam(p, header, 'kw_CREF', **gkwargs)\n p['FIB_POS_ID'] = p['kw_CREF'][0]\n elif p['FIBER'] in ['AB', 'A', 'B']:\n p['FIB_POS'] = spirouImage.ReadParam(p, header, 'kw_CCAS', **gkwargs)\n p['FIB_POS_ID'] = p['kw_CCAS'][0]\n else:\n emsg1 = ('Fiber position cannot be identified for fiber={0}'\n .format(p['FIB_TYP']))\n emsg2 = ' function={0}'.format(__NAME__)\n WLOG(p, 'error', [emsg1, emsg2])\n # set the source of fib_pos\n p.set_sources(['FIB_POS', 'FIB_POS_ID'], func_name)\n\n # identify lamp\n if kind is not None:\n lamp = kind\n elif filename is not None:\n lamp = decide_on_lamp_type(p, filename=filename)\n else:\n lamp = decide_on_lamp_type(p, filename=p['ARG_FILE_NAMES'][0])\n\n # -------------------------------------------------------------------------\n # Now set parameters in p based on lamp type\n\n # the lamp type\n p['LAMP_TYPE'] = lamp\n p.set_source('LAMP_TYPE', func_name)\n # the lamp file\n p['IC_LL_LINE_FILE'] = p['IC_LL_LINE_FILE_ALL'][lamp]\n p.set_source('IC_LL_LINE_FILE', func_name)\n # the lamp cat type\n p['IC_CAT_TYPE'] = p['IC_CAT_TYPE_ALL'][lamp]\n p.set_source('IC_CAT_TYPE', func_name)\n # -------------------------------------------------------------------------\n # finally return p\n return p", "def set_dims_in_hdr(hdr, startx, starty, cols, rows):\n hdr['startX'] = (startx, 'Starting CCD pixel column')\n hdr['endX'] = (startx + cols, 'Ending CCD pixel column+1')\n hdr['startY'] = (starty, 'Starting CCD pixel row')\n hdr['endY'] = (starty + rows, 'Ending CCD pixel row+1')", "def initializeC(self, image):\n super(CPupilFit, self).initializeC(image)\n\n self.mfit = self.clib.pfitInitialize(self.pupil_fn.getCPointer(),\n self.rqe,\n self.scmos_cal,\n self.default_tol,\n self.scmos_cal.shape[1],\n self.scmos_cal.shape[0])\n self.clib.pfitSetZRange(self.mfit,\n self.pupil_fn.getZMin(),\n self.pupil_fn.getZMax())", "def __setattr__(self, item, value):\n if item in ('header', 'lines', 'mag', 'z', 'cubes', 'images',\n 'spectra', 'tables', '_logger', '_filename',\n '_default_size', 'default_size'):\n super(Source, self).__setattr__(item, value)\n else:\n self.header[item] = value", "def createMfile(dHeader):\n\tif specParamsOK(dHeader):\n createMatlabScript(dHeader)\n else:\n raise 'spec params error'", "def preprocess(self):\n\n mm_magcoord.add_aacgm_coordinates(self)\n mm_magcoord.add_quasi_dipole_coordinates(self)\n mm_sc.calculate_ecef_velocity(self)\n mm_sc.add_ram_pointing_sc_attitude_vectors(self)\n\n return", "def set_raw_trf(self, head, body, line):\r\n self.trf_param = 0\r\n self.trf_head = trf_parse_head(head).strip()\r\n self.trf_gi = parse_fasta_head(self.trf_head)[0]\r\n self.trf_chr = parse_chromosome_name(self.trf_head)\r\n\r\n (self.trf_l_ind,\r\n self.trf_r_ind,\r\n self.trf_period,\r\n self.trf_n_copy,\r\n self.trf_l_cons,\r\n self.trf_pmatch,\r\n self.trf_indels,\r\n self.trf_score,\r\n self.trf_n_a,\r\n self.trf_n_c,\r\n self.trf_n_g,\r\n self.trf_n_t,\r\n self.trf_entropy,\r\n self.trf_consensus,\r\n self.trf_array) = trf_parse_line(line)\r\n\r\n self.trf_pvar = int(100 - float(self.trf_pmatch))\r\n\r\n try:\r\n self.trf_l_ind = int(self.trf_l_ind)\r\n except:\r\n print(self)\r\n\r\n self.trf_r_ind = int(self.trf_r_ind)\r\n self.trf_period = int(self.trf_period)\r\n self.trf_n_copy = float(self.trf_n_copy)\r\n\r\n self.trf_consensus = clear_sequence(self.trf_consensus)\r\n self.trf_array = clear_sequence(self.trf_array)\r\n\r\n self.trf_array_gc = get_gc(self.trf_array)\r\n self.trf_consensus_gc = get_gc(self.trf_consensus)\r\n self.trf_chr = parse_chromosome_name(self.trf_head)\r\n self.trf_array_length = len(self.trf_array)", "def set_file(self, filename):\n # TODO: check the existence of extensions\n # Open fits files\n hdulist = fits.open(filename)\n # TODO: don't hardcode extension numbers and names here ... pass on from gp-cwt\n self.set_data(hdulist[0].data, hdulist['NormOffMap'].data)\n self.header = hdulist[0].header\n self.wcs = WCS(self.header)", "def preprocess_image(filename, side='blue', flatcor = 'yes', \r\n remove_cosmics=True, trace=None):\r\n\r\n assert(flatcor in ['yes', 'no'])\r\n\r\n if trace is None:\r\n trace = det_pars[side]['trace']\r\n\r\n # Need to define an instrument translation file in iraf 2.16.1\r\n iraf.unlearn('setinst')\r\n iraf.setinst.instrument = 'kpnoheaders'\r\n iraf.setinst.review = 'no'\r\n iraf.setinst.mode = 'h'\r\n iraf.setinst()\r\n\r\n # bias subtraction using the overscan\r\n hdr = pyfits.getheader(filename)\r\n iraf.unlearn('ccdproc')\r\n iraf.ccdproc.zerocor = \"no\"\r\n iraf.ccdproc.flatcor = flatcor\r\n iraf.ccdproc.fixpix = \"no\"\r\n iraf.hedit(filename, 'GAIN', det_pars[side]['gain'], \r\n update=\"yes\", verify=\"no\", show=\"no\")\r\n iraf.hedit(filename, 'RON', det_pars[side]['readnoise'], \r\n update=\"yes\", verify=\"no\", show=\"no\")\r\n if side == 'blue':\r\n # update the header\r\n iraf.hedit(filename, 'DISPAXIS', 2, update=\"yes\", verify=\"no\", add=\"yes\", show=\"no\")\r\n # trim the specified region\r\n iraf.ccdproc.biassec = hdr['BSEC1']\r\n iraf.ccdproc.trimsec = \"[%d:%d,*]\" % (trace-100, trace+100)\r\n iraf.ccdproc.function = \"spline3\"\r\n iraf.ccdproc.order = 3\r\n else:\r\n # update the header\r\n iraf.hedit(filename, 'DISPAXIS', 1, update=\"yes\", verify=\"no\", add=\"yes\", show='no')\r\n # trim the specified region\r\n iraf.ccdproc.biassec = det_pars['red']['biassec']\r\n tsec_x = hdr['TSEC1'].split(',')[0]\r\n iraf.ccdproc.trimsec = tsec_x + \",%d:%d]\" % (trace-100, trace+100)\r\n iraf.ccdproc.function = \"legendre\"\r\n iraf.ccdproc.order = 1\r\n iraf.ccdproc.ccdtype = \"\"\r\n iraf.ccdproc.darkcor = \"no\"\r\n iraf.ccdproc.niterate = 3\r\n iraf.ccdproc(filename,\r\n flat=\"flat_%s_%s\" % (side, hdr['APERTURE']))\r\n\r\n if (side == 'blue') and ('FIXPIX' not in hdr):\r\n iraf.fixpix('blue????.fits', \"bluebpm\")\r\n\r\n if 'OBSERVAT' not in hdr:\r\n # update the headers\r\n iraf.asthedit(filename, BASE_DIR + '/cal/DBSP.hdr')\r\n\r\n # remove cosmic rays with LA Cosmic\r\n if remove_cosmics and ('COSMIC' not in hdr) and (hdr['EXPTIME'] > 60) and \\\r\n (hdr['TURRET'] == 'APERTURE'):\r\n array, header = pyfits.getdata(filename, header=True)\r\n c = cosmics.cosmicsimage(array, gain=det_pars[side]['gain'], \r\n readnoise=det_pars[side]['readnoise'], \r\n sigclip = 4.5, sigfrac = 0.5, objlim = 2.0, satlevel=60000,\r\n skyOrder = 0, objectOrder = 0)\r\n c.run(maxiter = 3)\r\n #header.update('COSMIC', 1, '1 if we ran LA Cosmic')\r\n header['COSMIC']= 1\r\n pyfits.writeto(filename, c.cleanarray, header, clobber=True)", "def edit_incar(param_label, i, dir, line_key = 'ENCUT', file = 'INCAR'):\n\n replacement_line = \" ENCUT = \" + i[0] + \" ! Plane-wave cutoff\"\n gen_file_editor(param_label, dir, file, replacement_line, line_key)\n\n return False", "def __init__(self, calc_id, particle_name, xp_partition):\n tdc_FMCI_XP_Data_Base.__init__(self)\n # name and calc_id\n self.name = particle_name\n self.calc_id = calc_id\n # setup XP_Data --------------------\n sample_dict = dict(name='regular', n_reduce=1, n_min=1)\n self.xp = tdc_XP_Data(calc_id, particle_name, sample_dict, get_weight=True)\n # interface to timetable -----------\n self.timetable = self.xp.timetable\n # setup properties -----------------\n setup_props = tdc_Setup_Props(calc_id)\n # normalization parameters\n self.W0 = setup_props.get_papam('FMPProps/W0')\n self.L = setup_props.get_papam('/GridProps/L')\n # physical parameters from \"setup_properties.h5\"\n self.PSR_P = setup_props.get_papam('/PulsarGapProps/P')\n self.PSR_B12 = setup_props.get_papam('/PulsarGapProps/B_12')\n self.PSR_Lcm = setup_props.get_papam('/GridProps/L_cm')\n # physical parameters from \"cascade.input\": THETA and CHI\n infile=AT.FileInput()\n infile.ReadFile(tdc_Filenames.get_full_filename(calc_id, 'cascade.input'))\n infile.ChangeGroup('GEOMETRY')\n self.PSR_Theta = infile.get_param('THETA')\n infile.ChangeGroup() \n infile.ChangeGroup('DIMENSIONAL_CONSTANTS::PSR_ConstsInitializer')\n self.PSR_Chi = infile.get_param('CHI')\n infile.ChangeGroup() \n # set xp_partition =================\n self.set_xp_partition(xp_partition)", "def generate_modelSED_photo_fit(sp=None,sfh_form=4,filters=None,add_igm_absorption=0,igm_type=0,params_fsps=None,DL_Gpc=0.0,cosmo='flat_LCDM',\n\tH0=70.0,Om0=0.3,params_val=None,interp_filters_waves=[],interp_filters_trans=[]):\n\n\tdef_params_fsps, params_assoc_fsps, status_log = list_params_fsps()\n\n\tformed_mass = pow(10.0,params_val['log_mass'])\n\n\t# input model parameters to FSPS:\n\tfor pp in range(len(params_fsps)):\n\t\tstr_temp = params_assoc_fsps[params_fsps[pp]]\n\t\tif status_log[params_fsps[pp]] == 0:\n\t\t\tsp.params[str_temp] = params_val[params_fsps[pp]]\n\t\telif status_log[params_fsps[pp]] == 1:\n\t\t\tsp.params[str_temp] = pow(10.0,params_val[params_fsps[pp]])\n\n\t# generate the SED:\n\tif sfh_form==0 or sfh_form==1:\n\t\tage = pow(10.0,params_val['log_age'])\n\t\twave, extnc_spec = sp.get_spectrum(peraa=True,tage=age) ## spectrum in L_sun/AA\n\t\tmass = sp.stellar_mass\n\t\tdust_mass0 = sp.dust_mass ## in solar mass/norm\n\telif sfh_form==2 or sfh_form==3 or sfh_form==4:\n\t\tt0 = pow(10.0,params_val['log_t0'])\n\t\ttau = pow(10.0,params_val['log_tau'])\n\t\tage = pow(10.0,params_val['log_age'])\n\t\talpha = pow(10.0,params_val['log_alpha'])\n\t\tbeta = pow(10.0,params_val['log_beta'])\n\t\tSFR_fSM,mass,wave,extnc_spec,dust_mass0 = csp_spec_restframe_fit(sp=sp,sfh_form=sfh_form,formed_mass=formed_mass,age=age,tau=tau,t0=t0,alpha=alpha,beta=beta)\n\n\t# redshifting\n\tredsh_wave,redsh_spec0 = cosmo_redshifting(DL_Gpc=DL_Gpc,cosmo=cosmo,H0=H0,Om0=Om0,z=params_val['z'],wave=wave,spec=extnc_spec)\n\n\t# IGM absorption:\n\tif add_igm_absorption == 1:\n\t\tif igm_type == 0:\n\t\t\ttrans = igm_att_madau(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\t\telif igm_type == 1:\n\t\t\ttrans = igm_att_inoue(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\n\t# normalize:\n\tnorm0 = formed_mass/mass\n\tredsh_spec = redsh_spec0*norm0\n\tdust_mass = dust_mass0*norm0\n\n\t# filtering:\n\tphoto_SED_flux = filtering_interp_filters(redsh_wave,redsh_spec,interp_filters_waves,interp_filters_trans)\n\n\treturn photo_SED_flux", "def main():\n print(\"Program version: 1.5\")\n StartTime = datetime.now()\n args = parseArguments()\n\n verbose = args.verbose\n images = args.images\n ignore_warnings = args.ignore_warnings\n if(args.silent):\n verbose = False\n images = False\n ignore_warnings = True\n\n if(args.images):\n plt.ioff()\n\n if(args.ignore_warnings):\n warnings.simplefilter('ignore', UserWarning)\n\n #sample header keywords\n # OBJECT = 'P016+03_P1_JKdeep' / Original target\n # RA = ' 01:06:37.759' / 01:06:37.7 RA (J2000) pointing\n # DEC = ' 03:32:36.096' / 03:32:36.0 DEC (J2000) pointing\n # EQUINOX = 2000. / Standard FK5 (years)\n # RADECSYS= 'FK5 ' / Coordinate reference frame\n # CRVAL1 = 16.65733 / 01:06:37.7, RA at ref pixel\n # CRVAL2 = 3.54336 / 03:32:36.0, DEC at ref pixel\n # CRPIX1 = 447. /Ref pixel in X\n # CRPIX2 = 452. / Ref pixel in Y\n # CDELT1 = -8.0000000000000E-5 / SS arcsec per pixel in RA\n # CDELT2 = 8.00000000000003E-5 / SS arcsec per pixel in DEC\n # CTYPE1 = 'RA---TAN' / pixel coordinate system\n # CTYPE2 = 'DEC--TAN' / pixel coordinate system\n # PC1_1 = 0.000000 / Translation matrix element\n # PC1_2 = 1.000000 / Translation matrix element\n # PC2_1 = -1.000000 / Translation matrix element\n # PC2_2 = 0.000000 / Translation matrix element\n\n fits_image_filenames = args.input\n\n #if directory given search for appropriate fits files\n\n if(os.path.isdir(fits_image_filenames[0])):\n print(\"detected a directory. Will search for fits files in it\")\n path = fits_image_filenames[0]\n fits_image_filenames = []\n for file in os.listdir(path):\n if file.endswith(\".fits\") and \"_astro\" not in file:\n fits_image_filenames.append(path+\"/\"+file)\n print(fits_image_filenames)\n\n multiple = False\n if(len(fits_image_filenames)>1):\n multiple = True\n not_converged = []\n converged_counter = 0\n for fits_image_filename in fits_image_filenames:\n\n result,_ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=0, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent =args.silent, sigma_threshold_for_source_detection= args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if((not result) and args.rotation_scaling):\n print(\"Did not converge. Will try again with full rotation and scaling\")\n result, _ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=args.rotation_scaling, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent=args.silent, sigma_threshold_for_source_detection=args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if(result):\n print(\"Astrometry was determined to be good.\")\n converged_counter = converged_counter+1\n else:\n print(\"Astrometry was determined to be bad.\")\n not_converged.append(fits_image_filename)\n if(args.save_bad_result):\n print(\"Result was saved anyway\")\n else:\n print(\"Result was not saved.\")\n # print(\"\")\n # print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n # print(\"> Astrometry for {} \".format(fits_image_filename))\n #\n # with fits.open(fits_image_filename) as hdul:\n # #print(hdul.info())\n # if(args.verbose):\n # print(\"if image is not at first position in the fits file the program will break later on\")\n # #print(hdul[0].header)\n #\n # hdu = hdul[0]\n # #hdu.verify('fix')\n # hdr = hdu.header\n #\n #\n # image_or = hdul[0].data.astype(float)\n # median = np.nanmedian(image_or)\n # image_or[np.isnan(image_or)]=median\n # image = image_or - median\n #\n # observation = find_sources(image, args.vignette)\n # #print(observation)\n #\n # positions = (observation['xcenter'], observation['ycenter'])\n # apertures = CircularAperture(positions, r=4.)\n #\n #\n # #world coordinates\n # print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n # print(WCS(hdr))\n #\n # hdr[\"NAXIS1\"] = image.shape[0]\n # hdr[\"NAXIS2\"] = image.shape[1]\n #\n # #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n # wcsprm = WCS(hdr).wcs\n # wcsprm_original = WCS(hdr).wcs\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n # wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, args.ra, args.dec, args.projection_ra, args.projection_dec)\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n #\n # #print(wcsprm)\n # #wcsprm.pc = [[2, 0],[0,1]]\n #\n #\n # #Possibly usefull examples of how to use wcsprm:\n # #print(wcsprm.set())\n # #print(wcsprm.get_pc())\n # #pc = wcsprm.get_pc()\n # #print(np.linalg.det(pc))\n # #print(wcsprm.get_cdelt())\n # #wcs.fix()\n # #print(wcsprm.print_contents())\n # #print(repr(hdr.update(wcsprm.to_header().encode('utf-8')))) #not working\n #\n # #hdu.verify(\"fix\")\n # #print(repr(hdr))\n # #wcs.wcs_pix2world(pixcrd, 1)\n # #wcs.wcs_world2pix(world, 1)\n # #wcs.wcs.crpix = [-234.75, 8.3393]\n # # wcs.wcs.cdelt = np.array([-0.066667, 0.066667])\n # # wcs.wcs.crval = [0, -90]\n # # wcs.wcs.ctype = [\"RA---AIR\", \"DEC--AIR\"]\n # # wcs.wcs.set_pv([(2, 1, 45.0)])\n # # For historical compatibility, three alternate specifications of the linear transformations\n # # are available in wcslib. The canonical PCi_ja with CDELTia, CDi_ja, and the deprecated CROTAia\n # # keywords. Although the latter may not formally co-exist with PCi_ja,\n # # the approach here is simply to ignore them if given in conjunction with PCi_ja.\n # # has_pc, has_cd and has_crota can be used to determine which of these alternatives are present in the header.\n # # These alternate specifications of the linear transformation matrix are translated immediately to PCi_ja by set\n # # and are nowhere visible to the lower-level routines. In particular, set resets cdelt to unity if CDi_ja is present\n # # (and no PCi_ja). If no CROTAia is associated with the latitude axis, set reverts to a unity PCi_ja matrix.\n #\n #\n #\n #\n #\n # #get rough coordinates\n # #print(hdr[\"RA\"])\n # #coord = SkyCoord(hdr[\"RA\"], hdr[\"DEC\"], unit=(u.hourangle, u.deg), frame=\"icrs\")\n # coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # if(not PIXSCALE_UNCLEAR):\n # if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n # print(\"central value outside of the image, moving it to the center\")\n # coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n # coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # #print(wcsprm)\n #\n #\n #\n # #better: put in nice wrapper! with repeated tries and maybe try synchron!\n # print(\">Dowloading catalog data\")\n # radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n # catalog_data = query.get_data(coord, radius, args.catalog)\n # #reference = reference.query(\"mag <20\")\n # max_sources = 500\n # if(INCREASE_FOV_FLAG):\n # max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n # if(catalog_data.shape[0]>max_sources):\n # catalog_data = catalog_data.nsmallest(400, \"mag\")\n #\n # if(args.catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n # print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n # catalog_data2 = query.get_data(coord, radius, \"PS\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n # elif(args.catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n # print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n # catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n #\n # #remove duplicates in catalog?\n #\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n #\n # #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Input for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n #\n # plt.xlim(-200,image.shape[0]+200)\n # plt.ylim(-200,image.shape[1]+200)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_before.pdf\")\n #\n # ###tranforming to match the sources\n # print(\"---------------------------------\")\n # print(\">Finding the transformation\")\n # if(args.rotation_scaling):\n # print(\"Finding scaling and rotation\")\n # wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=args.verbose)\n # if(args.xy_transformation):\n # print(\"Finding offset\")\n # wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= args.verbose)\n #\n # #correct subpixel error\n # obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=3)\n # rms = np.sqrt(np.mean(np.square(distances)))\n # best_score = len(obs_x)/(rms+10) #start with current best score\n # fine_transformation = False\n # if(args.fine_transformation):\n # for i in [2,3,5,8,10,6,4, 20,2,1,0.5]:\n # wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i)\n # if(score> best_score):\n # wcsprm = wcsprm_new\n # best_score = score\n # fine_transformation = True\n # if not fine_transformation:\n # print(\"Fine transformation did not improve result so will be discarded.\")\n # else:\n # print(\"Fine transformation applied to improve result\")\n # #register.calculate_rms(observation, catalog_data,wcs)\n #\n # #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n # wcs =WCS(wcsprm.to_header())\n # if(args.verbose):\n # print(wcs)\n #\n # from astropy.wcs import utils\n # scales = utils.proj_plane_pixel_scales(wcs)\n # print(scales)\n # cdelt = wcsprm.get_cdelt()\n # print(cdelt)\n # scale_ratio = scales/cdelt\n # #print(scale_ratio)\n # pc = np.array(wcsprm.get_pc())\n # pc[0,0] = pc[0,0]/scale_ratio[0]\n # pc[1,0] = pc[1,0]/scale_ratio[1]\n # pc[0,1] = pc[0,1]/scale_ratio[0]\n # pc[1,1] = pc[1,1]/scale_ratio[1]\n # wcsprm.pc = pc\n # wcsprm.cdelt = scales\n # if(args.verbose):\n # print(\"moved scaling info to CDelt\")\n # print(WCS(wcsprm.to_header()))\n #\n # #WCS difference before and after\n # print(\"> Compared to the input the Wcs was changed by: \")\n # scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n # print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n # #sources:\n # #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n # #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n # def unit_vector(vector):\n # \"\"\" Returns the unit vector of the vector. \"\"\"\n # return vector / max(np.linalg.norm(vector), 1e-10)\n # def matrix_angle( B, A ):\n # \"\"\" comment cos between vectors or matrices \"\"\"\n # Aflat = A.reshape(-1)\n # Aflat = unit_vector(Aflat)\n # Bflat = B.reshape(-1)\n # Bflat = unit_vector(Bflat)\n # #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n # return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n # #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n # rotation_angle = matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360\n # if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n # text = \"counterclockwise\"\n # else:\n # text = \"clockwise\"\n # print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n # old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n # print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n #\n #\n # #check final figure\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Result for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_after.pdf\")\n #\n # print(\"--- Evaluate how good the transformation is ----\")\n # register.calculate_rms(observation, catalog_data,wcsprm)\n #\n #\n # #updating file\n # write_wcs_to_hdr(fits_image_filename, wcsprm)\n #\n #\n # print(\"overall time taken\")\n # print(datetime.now()-StartTime)\n # if(args.images):\n # plt.show()\n if(multiple):\n print(\">> Final report:\")\n print(\"Processed {} files, {} of them did converge. The following files failed:\".format(len(fits_image_filenames), converged_counter))\n print(not_converged)\n print(\"-- finished --\")", "def writePOVRAYHeader(self, fh):\n settings = self.mainWindow.preferences.povrayForm\n\n focalPoint = self.camera.GetFocalPoint()\n campos = self.camera.GetPosition()\n viewup = self.camera.GetViewUp()\n angle = settings.viewAngle\n if settings.shadowless:\n shadowless = \"shadowless \"\n else:\n shadowless = \"\"\n\n if self.parent.blackBackground:\n rval = gval = bval = 0\n else:\n rval = gval = bval = 1\n\n fh.write(\"camera { perspective location <%f,%f,%f>\\n\" % (- campos[0], campos[1], campos[2]))\n fh.write(\" look_at <%f,%f,%f>\\n\" % (- focalPoint[0], focalPoint[1], focalPoint[2]))\n fh.write(\" angle %f\\n\" % angle)\n fh.write(\" sky <%f,%f,%f> }\\n\" % (- viewup[0], viewup[1], viewup[2]))\n fh.write(\"light_source { <%f,%f,%f> color rgb <1,1,1> %s }\\n\" % (- campos[0], campos[1], campos[2], shadowless))\n fh.write(\"background { color rgb <%f,%f,%f> }\\n\" % (rval, gval, bval))", "def setMultiLabelValuesFromCP(self, cp):\n self.setInternalLabelSet(getattr(cp, \"internallabels\", None))\n self.setIngressLabelSet (getattr(cp, \"ingresslabels\", None))\n self.setEgressLabelSet (getattr(cp, \"egresslabels\", None))", "def edit_header(my_vcf):\n header = my_vcf.header.copy()\n header.add_line(('##INFO=<ID=GTCNT,Number=.,Type=Integer,'\n 'Description=\"Counts of genotypes for the allele (UNK, REF, HET, HOM)\">'))\n return header", "def add_pv_keywords(header, sipx, sipy, pvrange, tpvx, tpvy, tpv=True):\n for p in pvrange:\n val = float(calcpv(pvrange, 1, p, sipx, sipy, tpvx, tpvy).evalf())\n if val != 0.0:\n header['PV1_%d' % p] = val\n for p in pvrange:\n val = float(calcpv(pvrange, 2, p, sipx, sipy, tpvx, tpvy).evalf())\n if val != 0.0:\n header['PV2_%d' % p] = val\n if tpv:\n header['CTYPE1'] = 'RA---TPV'\n header['CTYPE2'] = 'DEC--TPV'\n else:\n header['CTYPE1'] = header['CTYPE1'][:8]\n header['CTYPE2'] = header['CTYPE2'][:8]\n return", "def coadd(hdr_list, data_list, var_list, exp_list,\n method='mean', weighted=True, robust=True, sigma=8.0,\n maxiters=5, spectral=False, cube=False, wcskey=' ',\n rotate=True, fit_order=2, window=7.0, smoothing=2.0,\n adaptive_algorithm=None, edge_threshold=0.7,\n reference='first'):\n\n # cube is only supported for spectral data\n if cube:\n spectral = True\n\n # reference all data to the first file\n out_header = hdr_list[0].copy()\n\n # set reference angle to zero if it isn't already\n key = wcskey.strip().upper()\n if rotate:\n for wkey in [f'CROTA2{key}',\n f'PC1_1{key}', f'PC1_2{key}',\n f'PC2_1{key}',\n f'PC2_2{key}', f'PC2_3{key}',\n f'PC3_2{key}', f'PC3_3{key}']:\n if wkey in out_header:\n if wkey == f'CROTA2{key}':\n out_header[wkey] = 0.0\n else:\n del out_header[wkey]\n\n # swap RA to east-left if needed\n ra = f'CDELT1{key}'\n if not cube and ra in out_header and out_header[ra] > 0:\n out_header[ra] *= -1\n\n # turn down logging to avoid FITS warning for 3D coord sys\n olevel = log.level\n log.setLevel('ERROR')\n if not spectral:\n outwcs = WCS(out_header, key=wcskey, naxis=2)\n else:\n outwcs = WCS(out_header, key=wcskey)\n log.setLevel(olevel)\n\n wcs_dim = outwcs.wcs.naxis\n if cube and wcs_dim < 3:\n msg = 'WCS is not 3D. Cannot make cube.'\n log.error(msg)\n raise ValueError(msg)\n\n if cube:\n # expectation is that 3D coord was in a secondary WCS --\n # we don't handle it if not\n if key == '':\n log.error('Unexpected input WCS condition. '\n 'Cannot fix output header.')\n raise ValueError\n\n method = 'resample'\n if 'SLTW_PIX' not in out_header:\n log.warning('Slit width not in header; output flux '\n 'may not be conserved.')\n float_slitw = out_header.get('SLTW_PIX', 1.0)\n slit_width = int(np.round(float_slitw))\n else:\n float_slitw = 1.0\n slit_width = 1\n\n # if referencing to a target RA/Dec (e.g. for nonsidereal targets),\n # get the target position in reference x, y coordinates\n tgt_x, tgt_y = None, None\n if reference == 'target':\n tgt_x, tgt_y = _target_xy(out_header, outwcs)\n if None in (tgt_x, tgt_y):\n msg = 'Missing TGTRA or TGTDEC; cannot reference to target.'\n log.warning(msg)\n\n out_coord_x = []\n out_coord_y = []\n out_coord_w = []\n flxvals = []\n errvals = []\n expvals = []\n corners = []\n for (hdr, flux, var, exp) in zip(hdr_list, data_list, var_list, exp_list):\n # input wcs\n if not spectral:\n inwcs = WCS(hdr, key=wcskey, naxis=2)\n else:\n inwcs = WCS(hdr, key=wcskey)\n\n # assemble flux, error, and exposure map values\n ny, nx = flux.shape\n err = np.sqrt(var)\n good = ~np.isnan(flux) & ~np.isnan(err)\n if not np.any(good):\n log.warning(f\"No good data in \"\n f\"{hdr.get('FILENAME', 'UNKNOWN')}; skipping.\")\n continue\n if method == 'resample':\n flxvals.append(flux[good])\n errvals.append(err[good])\n else:\n flxvals.append(flux)\n errvals.append(err)\n if cube:\n # exposure value is at one wavelength only, with\n # slit width size, plus two zero columns for padding\n expval = exp[:, 0:slit_width + 2]\n expval[:, 0] = 0\n expval[:, -1] = 0\n expvals.append(expval)\n else:\n expvals.append(exp)\n\n # index values for resampling\n yin, xin = np.meshgrid(np.arange(ny), np.arange(nx), indexing='ij')\n yin = yin[good]\n xin = xin[good]\n xamin, xamax = np.argmin(xin), np.argmax(xin)\n yamin, yamax = np.argmin(yin), np.argmax(yin)\n\n # corner values for interpolation\n if cube:\n in_corner = [[xin[xamin], xin[xamin],\n xin[xamax], xin[xamax]],\n [yin[yamin], yin[yamax],\n yin[yamin], yin[yamax]],\n [-slit_width / 2 + 0.5, -slit_width / 2 + 0.5,\n slit_width / 2 - 0.5, slit_width / 2 - 0.5]]\n else:\n in_corner = [[xin[xamin], xin[xamin],\n xin[xamax], xin[xamax]],\n [yin[yamin], yin[yamax],\n yin[yamin], yin[yamax]]]\n\n # transform all coords to reference WCS\n if wcs_dim == 2:\n wxy = inwcs.wcs_pix2world(xin, yin, 0)\n oxy = outwcs.wcs_world2pix(*wxy, 0)\n cxy = inwcs.wcs_pix2world(*in_corner, 0)\n out_corner = outwcs.wcs_world2pix(*cxy, 0)\n else:\n wxy = inwcs.wcs_pix2world(xin, yin, 0, 0)\n oxy = outwcs.wcs_world2pix(*wxy, 0)\n if cube:\n cxy = inwcs.wcs_pix2world(*in_corner, 0)\n out_corner = outwcs.wcs_world2pix(*cxy, 0)\n # ra, dec corners\n in_corner = [in_corner[2], in_corner[1]]\n # correct for slit width offset in not-yet\n # existant 3rd dimension\n out_corner = np.array([out_corner[2] - slit_width / 2,\n out_corner[1]])\n else:\n cxy = inwcs.wcs_pix2world(*in_corner, 0, 0)\n out_corner = outwcs.wcs_world2pix(*cxy, 0)[0:2]\n\n # correct all coordinates for target movement\n x_off, y_off = 0., 0.\n if None not in [tgt_x, tgt_y]:\n upd_x, upd_y = _target_xy(hdr, outwcs)\n if None in [upd_x, upd_y]:\n log.warning(f\"Missing target RA/Dec in file \"\n f\"{hdr.get('FILENAME', 'UNKNOWN')}.\")\n else:\n x_off = tgt_x - upd_x\n y_off = tgt_y - upd_y\n\n if cube and wcs_dim == 3:\n # assuming crval1=wavelength, crval2=dec, crval3=ra\n out_coord_w.append(oxy[0])\n out_coord_y.append(oxy[1] + y_off)\n out_coord_x.append(oxy[2] + x_off)\n else:\n out_coord_x.append(oxy[0] + x_off)\n out_coord_y.append(oxy[1] + y_off)\n\n out_corner[0] += x_off\n out_corner[1] += y_off\n corners.append((in_corner, out_corner))\n\n # output grid shape\n stk_coord_x = np.hstack(out_coord_x)\n minx, maxx = np.min(stk_coord_x), np.max(stk_coord_x)\n stk_coord_y = np.hstack(out_coord_y)\n miny, maxy = np.min(stk_coord_y), np.max(stk_coord_y)\n\n # shift coordinates to new grid\n stk_coord_x -= minx\n stk_coord_y -= miny\n\n # stack coordinates for output grid\n if cube:\n stk_coord_w = np.hstack(out_coord_w)\n minw, maxw = np.min(stk_coord_w), np.max(stk_coord_w)\n out_shape = (int(np.ceil(maxw) - np.floor(minw) + 1),\n int(np.ceil(maxy) - np.floor(miny) + 1),\n int(np.ceil(maxx) - np.floor(minx)) + 1)\n stk_coord_w -= minw\n coordinates = stack(stk_coord_x, stk_coord_y, stk_coord_w)\n\n xout = np.arange(out_shape[2], dtype=np.float64)\n yout = np.arange(out_shape[1], dtype=np.float64)\n wout = np.arange(out_shape[0], dtype=np.float64)\n grid = xout, yout, wout\n\n # fix header reference pixel for new min value in w and x\n out_header['CRPIX1' + key] -= minw\n out_header['CRPIX2' + key] -= miny\n out_header['CRPIX3' + key] -= minx\n else:\n out_shape = (int(np.ceil(maxy) - np.floor(miny) + 1),\n int(np.ceil(maxx) - np.floor(minx)) + 1)\n\n coordinates = stack(stk_coord_x, stk_coord_y)\n\n xout = np.arange(out_shape[1], dtype=np.float64)\n yout = np.arange(out_shape[0], dtype=np.float64)\n grid = xout, yout\n\n # fix header reference pixel\n out_header['CRPIX1' + key] -= minx\n out_header['CRPIX2' + key] -= miny\n\n # also fix primary coordinates for 2D spectrum\n if key != '' and wcs_dim > 2:\n out_header['CRPIX1'] -= minx\n out_header['CRPIX2'] -= miny\n\n log.info('Output shape: {}'.format(out_shape))\n\n # use local polynomial fits to resample and coadd data\n if method == 'resample':\n flxvals = np.hstack(flxvals)\n errvals = np.hstack(errvals)\n\n if cube:\n edge_threshold = (edge_threshold, edge_threshold, 0)\n window = (window, window, 2.0)\n smoothing = (smoothing, smoothing, 1.0)\n if adaptive_algorithm in ['scaled', 'shaped']:\n adaptive_threshold = (1.0, 1.0, 0.0)\n else:\n adaptive_threshold = None\n adaptive_algorithm = None\n else:\n if adaptive_algorithm in ['scaled', 'shaped']:\n adaptive_threshold = 1.0\n else:\n adaptive_threshold = None\n adaptive_algorithm = None\n\n max_cores = psutil.cpu_count() - 1\n if max_cores < 2: # pragma: no cover\n max_cores = None\n\n log.info('Setting up output grid.')\n resampler = Resample(\n coordinates, flxvals, error=errvals,\n window=window, order=fit_order, fix_order=True)\n\n log.info('Resampling flux data.')\n flux, std = resampler(\n *grid, smoothing=smoothing, edge_threshold=edge_threshold,\n adaptive_threshold=adaptive_threshold,\n adaptive_algorithm=adaptive_algorithm,\n edge_algorithm='distribution', get_error=True,\n error_weighting=weighted, jobs=max_cores)\n var = std**2\n\n log.info('Interpolating and summing exposure maps.')\n if cube:\n expmap = np.zeros(out_shape[1:], dtype=float)\n else:\n expmap = np.zeros(out_shape, dtype=float)\n for i, expval in enumerate(expvals):\n inx, iny = corners[i][0]\n outx, outy = corners[i][1]\n outx -= minx\n outy -= miny\n exp_out = warp_image(\n expval, inx, iny, outx, outy,\n output_shape=expmap.shape, cval=0,\n order=1, interpolation_order=1)\n expmap += exp_out\n else:\n # interpolate corners for approximate warp solution\n log.info('Interpolating all images.')\n\n flx = []\n vr = []\n expmap = np.zeros(out_shape)\n for i, (flxval, errval, expval) in \\\n enumerate(zip(flxvals, errvals, expvals)):\n inx, iny = corners[i][0]\n outx, outy = corners[i][1]\n outx -= minx\n outy -= miny\n\n # flux image\n flx.append(\n warp_image(flxval, inx, iny, outx, outy,\n output_shape=out_shape, cval=np.nan,\n order=1, interpolation_order=1))\n\n # var image\n vr.append(\n warp_image(errval**2, inx, iny, outx, outy,\n output_shape=out_shape, cval=np.nan,\n order=1, interpolation_order=0))\n\n # exposure map image\n exp_out = warp_image(\n expval, inx, iny, outx, outy,\n output_shape=out_shape, cval=0,\n order=1, interpolation_order=0)\n expmap += exp_out\n\n if len(flx) > 1:\n log.info('{}-combining images.'.format(method.title()))\n flux, var = combine_images(\n flx, variance=vr, method=method, weighted=weighted,\n robust=robust, sigma=sigma, maxiters=maxiters)\n else:\n flux, var = flx[0], vr[0]\n\n if cube:\n # reconstruct as primary wcs\n key = wcskey.strip().upper()\n wcs_key_set = ['CTYPE1', 'CTYPE2', 'CUNIT1', 'CUNIT2',\n 'CRPIX1', 'CRPIX2', 'CRVAL1', 'CRVAL2',\n 'CDELT1', 'CDELT2', 'CROTA2', 'SPECSYS',\n f'CTYPE1{key}', f'CTYPE2{key}', f'CTYPE3{key}',\n f'CUNIT1{key}', f'CUNIT2{key}', f'CUNIT3{key}',\n f'CRPIX1{key}', f'CRPIX2{key}', f'CRPIX3{key}',\n f'CRVAL1{key}', f'CRVAL2{key}', f'CRVAL3{key}',\n f'CDELT1{key}', f'CDELT2{key}', f'CDELT3{key}',\n f'RADESYS{key}', f'EQUINOX{key}', f'SPECSYS{key}']\n tmp = out_header.copy()\n for wkey in wcs_key_set:\n if wkey in out_header:\n del out_header[wkey]\n if wkey.endswith(key) and wkey in tmp:\n # swap coords 1 and 3 (to make it wave, RA, Dec)\n new_key = wkey[:-1].replace('3', '9999')\n new_key = new_key.replace('1', '3').replace('9999', '1')\n hdinsert(out_header, new_key, tmp[wkey], tmp.comments[wkey])\n\n # fix source position estimate too\n if 'SRCPOSX' in out_header and 'SRCPOSY' in out_header:\n coord = ([out_header['SRCPOSX']],\n [out_header['SRCPOSY']])\n first_wcs = WCS(hdr_list[0], naxis=2)\n out_wcs = WCS(out_header, naxis=2)\n sxy = first_wcs.wcs_pix2world(*coord, 0)\n new_xy = out_wcs.wcs_world2pix(*sxy, 0)\n out_header['SRCPOSX'] = new_xy[0][0]\n out_header['SRCPOSY'] = new_xy[1][0]\n\n if cube:\n # correct flux for pixel size change\n # before: pixel x slit width in pixels\n # after: pixel x pixel\n flux /= float_slitw\n var /= float_slitw**2\n\n return out_header, flux, var, expmap", "def __init__(self, coefficient, basefield=None):\n\n # parameter parse\n try:\n character = basefield.getCharacteristic()\n field = basefield\n except AttributeError:\n # backward compatibility\n if isinstance(basefield, int):\n field = finitefield.FinitePrimeField.getInstance(basefield)\n character = basefield\n else:\n raise ValueError(\"basefield must be FiniteField object.\")\n\n coeffs_list = []\n if isinstance(coefficient, list):\n for c in coefficient:\n if isinstance(c, int):\n coeff = field.createElement(c)\n elif c in field:\n coeff = c\n else:\n raise ValueError(\"coefficient not in basefield.\")\n coeffs_list.append(coeff)\n\n # general initialize\n ECGeneric.__init__(self, coeffs_list, field)\n\n zero = self.basefield.zero\n one = self.basefield.one\n\n # format attribute\n if self.ch == 2:\n if len(self) == 5:\n # FIXME\n if coeffs_list[0] % 2 == one and coeffs_list[2] % 2 == coeffs_list[3] % 2 == zero and coeffs_list[4]:\n self.a1 = one\n self.a2 = coeffs_list[1]\n self.a3 = zero\n self.a4 = zero\n self.a6 = coeffs_list[4]\n self.b2 = one\n self.b4 = zero\n self.b6 = zero\n self.b8 = self.a6\n self.c4 = one\n self.c6 = one\n self.disc = self.a6\n self.j = self.disc.inverse()\n elif coeffs_list[0] % 2 == coeffs_list[1] % 2 == zero and coeffs_list[2]:\n self.a1 = zero\n self.a2 = zero\n self.a3 = coeffs_list[2]\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = zero\n self.b4 = zero\n self.b6 = self.a3**2\n self.b8 = self.a4**2\n self.c4 = zero\n self.c6 = zero\n self.disc = self.a3**4\n self.j = zero\n else:\n raise ValueError(\"coefficient may be not representation of EC.\")\n else:\n raise ValueError(\"coefficient may only use full Weierstrass form for characteristic 2.\")\n elif self.ch == 3: # y^2=x^3+a2*x^2+a6 or y^2=x^3+a4*x+a6\n # FIXME\n if len(self) == 5:\n if coeffs_list[0] % 3 == coeffs_list[2] % 3 == coeffs_list[3] % 3 == 0 and coeffs_list[1] and coeffs_list[4]:\n self.a1 = zero\n self.a2 = coeffs_list[1]\n self.a3 = zero\n self.a4 = zero\n self.a6 = coeffs_list[4]\n self.b2 = self.a2\n self.b4 = zero\n self.b6 = self.a6\n self.b8 = self.a2*self.a6\n self.c4 = self.b2**2\n self.c6 = 2*self.b2**3\n self.disc = -self.a2**3*self.a6\n self.j = (-self.a2**3)*self.a6.inverse()\n elif coeffs_list[0] == coeffs_list[1] == coeffs_list[2] == 0 and coeffs_list[3]:\n self.a1 = zero\n self.a2 = zero\n self.a3 = zero\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = zero\n self.b4 = 2*self.a4\n self.b6 = self.a6\n self.b8 = 2*self.a4**2\n self.c4 = zero\n self.c6 = zero\n self.disc = -self.a4**3\n self.j = zero\n else:\n raise ValueError(\"can't defined EC.\")\n if not self.disc:\n raise ValueError(\"this curve is singular.\")\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n else:\n if len(self) == 5:\n self.a1 = coeffs_list[0]\n self.a2 = coeffs_list[1]\n self.a3 = coeffs_list[2]\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = self.a1**2+4*self.a2\n self.b4 = self.a1*self.a3+2*self.a4\n self.b6 = self.a3**2+4*self.a6\n self.b8 = self.a1**2*self.a6+4*self.a2*self.a6-self.a1*self.a3*self.a4+self.a2*self.a3**2-self.a4**2\n self.c4 = self.b2**2-24*self.b4\n self.c6 = -self.b2**3+36*self.b2*self.b4-216*self.b6\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n if self.disc:\n self.j = self.c4**3*self.disc.inverse()\n else:\n raise ValueError(\"coefficients creates singular curve.\")\n elif len(self) == 2:\n self.a = coeffs_list[0]\n self.b = coeffs_list[1]\n self.a1 = zero\n self.a2 = zero\n self.a3 = zero\n self.a4 = self.a\n self.a6 = self.b\n self.b2 = zero\n self.b4 = 2*self.a\n self.b6 = 4*self.b\n self.b8 = -(self.a**2)\n self.c4 = -48*self.a\n self.c6 = -864*self.b\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n if self.disc:\n self.j = self.c4**3*self.disc.inverse()\n else:\n raise ValueError(\"coefficients creates singular curve.\")\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n\n self.ord = None\n self.abelian = None\n self.cubic = UniVarPolynomial({0:self.a6, 1:self.a4, 2:self.a2, 3:one},\n self.basefield)", "def populate_sidecar(self, **kwargs):\n\n # if it's an ecat it's Siemens\n self.sidecar_template['Manufacturer'] = 'Siemens'\n # Siemens model best guess\n self.sidecar_template['ManufacturersModelName'] = self.ecat_header.get('SERIAL_NUMBER', None)\n self.sidecar_template['TracerRadionuclide'] = self.ecat_header.get('ISOTOPE_NAME', None)\n self.sidecar_template['PharmaceuticalName'] = self.ecat_header.get('RADIOPHARAMCEUTICAL', None)\n\n # collect frame time start and populate various subheader fields\n for subheader in self.subheaders:\n self.sidecar_template['DecayCorrectionFactor'].append(subheader.get('DECAY_CORR_FCTR', None))\n self.sidecar_template['FrameTimesStart'].append(subheader.get('FRAME_START_TIME', None))\n self.sidecar_template['FrameDuration'].append(subheader.get('FRAME_DURATION', None))\n self.sidecar_template['ScaleFactor'].append(subheader.get('SCALE_FACTOR', None))\n\n # note some of these values won't be in the subheaders for the standard matrix image\n # need to make sure to clean up arrays and fields filled w/ none during pruning\n self.sidecar_template['ScatterFraction'].append(subheader.get('SCATTER_FRACTION', None))\n self.sidecar_template['PromptRate'].append(subheader.get('PROMPT_RATE', None))\n self.sidecar_template['RandomRate'].append(subheader.get('RANDOM_RATE', None))\n self.sidecar_template['SinglesRate'].append(subheader.get('SINGLES_RATE', None))\n\n # collect possible reconstruction method from subheader\n recon_method = helper_functions.get_recon_method(self.subheaders[0].get('ANNOTATION'))\n if recon_method:\n self.sidecar_template.update(**recon_method)\n\n # collect and convert start times for acquisition/time zero?\n scan_start_time = self.ecat_header.get('SCAN_START_TIME', None)\n\n if scan_start_time:\n scan_start_time = parse_this_date(scan_start_time)\n self.sidecar_template['AcquisitionTime'] = scan_start_time\n self.sidecar_template['ScanStart'] = scan_start_time\n\n # collect dose start time\n dose_start_time = self.ecat_header.get('DOSE_START_TIME', None)\n if dose_start_time:\n parsed_dose_time = parse_this_date(dose_start_time)\n self.sidecar_template['PharmaceuticalDoseTime'] = parsed_dose_time\n\n # if decay correction exists mark decay correction boolean as true\n if len(self.decay_factors) > 0:\n self.sidecar_template['ImageDecayCorrected'] = \"true\"\n\n # calculate scaling factor\n sca = self.data.max() / 32767\n\n self.sidecar_template['DoseCalibrationFactor'] = sca * self.ecat_header.get('ECAT_CALIBRATION_FACTOR')\n self.sidecar_template['Filename'] = os.path.basename(self.nifti_file)\n self.sidecar_template['ImageSize'] = [\n self.subheaders[0]['X_DIMENSION'],\n self.subheaders[0]['Y_DIMENSION'],\n self.subheaders[0]['Z_DIMENSION'],\n self.ecat_header['NUM_FRAMES']\n ]\n\n self.sidecar_template['PixelDimensions'] = [\n self.subheaders[0]['X_PIXEL_SIZE'] * 10,\n self.subheaders[0]['Y_PIXEL_SIZE'] * 10,\n self.subheaders[0]['Z_PIXEL_SIZE'] * 10\n ]\n\n # add tag for conversion software\n self.sidecar_template['ConversionSoftware'] = 'pypet2bids'\n self.sidecar_template['ConversionSoftwareVersion'] = helper_functions.get_version()\n\n\n\n # include any additional values\n if kwargs:\n self.sidecar_template.update(**kwargs)\n\n if not self.sidecar_template.get('TimeZero', None):\n if not self.sidecar_template.get('AcquisitionTime', None):\n logger.warn(f\"Unable to determine TimeZero for {self.ecat_file}, you need will need to provide this\"\n f\" for a valid BIDS sidecar.\")\n else:\n self.sidecar_template['TimeZero'] = self.sidecar_template['AcquisitionTime']\n\n # lastly infer radio data if we have it\n meta_radio_inputs = dcm2niix4pet.check_meta_radio_inputs(self.sidecar_template)\n self.sidecar_template.update(**meta_radio_inputs)\n\n # clear any nulls from json sidecar and replace with none's\n self.sidecar_template = helper_functions.replace_nones(self.sidecar_template)", "def _MoeLayerParams(ff_p):\n assert issubclass(ff_p.cls,\n layers_with_attention.TransformerFeedForwardLayer)\n assert p.num_experts > 0\n moe_p = p.moe_layer_tpl.Copy()\n # Copy over the base params.\n base_layer.BaseLayer.CopyBaseParams(ff_p, moe_p)\n # Set other params.\n moe_p.name = ff_p.name\n moe_p.input_dim = ff_p.input_dim\n moe_p.output_dim = ff_p.output_dim\n moe_p.hidden_dim = ff_p.hidden_dim\n moe_p.activation = ff_p.activation\n moe_p.residual_dropout_prob = ff_p.residual_dropout_prob\n moe_p.relu_dropout_prob = ff_p.relu_dropout_prob\n moe_p.dropout_tpl = ff_p.residual_dropout_tpl.Copy()\n moe_p.num_groups = p.num_groups\n moe_p.min_group_size = p.min_group_size\n moe_p.num_experts = p.num_experts\n # weight_split_dims_mapping and activation_split_dims_mapping should have\n # been set through p.moe_layer_tpl params.\n return moe_p", "def overlay(self):\n # retrieve header for photometry keywords\n # from current frame only\n hdr_str = self.run('fits header', via='get')\n\n # read it in to a fits header\n phdr = fits.Header()\n hdr = phdr.fromstring(hdr_str, sep='\\n')\n\n try:\n srcposx = hdr['SRCPOSX'] + 1\n srcposy = hdr['SRCPOSY'] + 1\n s1 = 'point({:f} {:f}) # ' \\\n 'point=x ' \\\n 'color=blue tag={{srcpos}} '\\\n 'text=SRCPOS'.format(srcposx, srcposy)\n self.run('regions', s1)\n except (KeyError, ValueError):\n pass\n try:\n stcentx = hdr['STCENTX'] + 1\n stcenty = hdr['STCENTY'] + 1\n photaper = hdr['PHOTAPER']\n photskap = [float(x) for x in hdr['PHOTSKAP'].split(',')]\n s1 = 'point({:f} {:f}) # ' \\\n 'point=x ' \\\n 'color=cyan tag={{srcpos}}'.format(stcentx, stcenty)\n self.run('regions', s1)\n s2 = 'circle({:f} {:f} {:f}) # ' \\\n 'color=cyan tag={{srcpos}}'.format(\n stcentx, stcenty, photaper)\n self.run('regions', s2)\n s3 = 'annulus({:f} {:f} {:f} {:f}) # ' \\\n 'color=cyan tag={{srcpos}} text=STCENT'.format(\n stcentx, stcenty, photskap[0], photskap[1])\n self.run('regions', s3)\n except (KeyError, ValueError):\n pass\n try:\n stcentx = hdr['STCENTX'] + 1\n stcenty = hdr['STCENTY'] + 1\n flux = hdr['STAPFLX']\n sky = hdr['STAPSKY']\n s1 = 'text({:f} {:f}) # color=cyan ' \\\n 'text=\"Flux={:.2f}, Sky={:.2f}\"'.format(\n stcentx, stcenty - 40, flux, sky)\n self.run('regions', s1)\n except (KeyError, ValueError):\n pass\n\n # try overlaying apertures as well\n try:\n self.overlay_aperture(hdr)\n except ValueError: # pragma: no cover\n # may be encountered with extensions with\n # unexpected WCSs\n pass", "def __init__(self,image_name,sym_pred_list,latex = 'LATEX_REPR'):\n self.image_name = image_name\n self.latex = latex\n self.sym_pred_list = sym_pred_list", "def generate_headerfile(template, n_division=10000, df=6, start_chi=25, filepath=\"Chi2PLookup.h\", verbose=False):\n divisor = \"const int Chi2PLookup::divisor = {};\".format(n_division)\n\n names = []\n cutoff = []\n p_values_arrays = []\n degrees_of_freedom = range(1, df+1)\n\n if verbose:\n print(\"Generating p-value arrays...\")\n print(\" df={}\".format(df))\n print(\" precision={}\".format(n_division))\n\n for df in degrees_of_freedom:\n var_name = \"pValues_{}\".format(df)\n names.append(var_name)\n max_chi = max_chi_value(df=df, start_chi=start_chi)\n cutoff.append(max_chi)\n n_elements = max_chi * n_division\n\n chi_values = (val / n_division for val in range(0, n_elements + 1))\n p_values = (str(1 - chi2.cdf(val, df)) for val in chi_values)\n\n if verbose:\n print(\"\\tAdding p-values array to template for degree of freedom = {} ...\".format(df))\n\n p_values_arrays.append(\"double {}[] = {{{}}};\".format(var_name, \", \".join(p_values)))\n\n cutoff_array = \"const int Chi2PLookup::cutoff[] = {{{}}};\".format(\", \".join([str(i) for i in cutoff]))\n p_values_array_of_arrays = \"const double * Chi2PLookup::pValues[] = {{{}}};\\n\".format(\", \".join(names))\n\n template = template.format(divisor, cutoff_array, \"\\n\".join(p_values_arrays), p_values_array_of_arrays)\n\n if verbose:\n print(\"Saving file to: {}\".format(os.path.abspath(filepath)))\n\n with open(filepath, \"w\") as outfile:\n outfile.write(template)\n\n return template", "def pibooth_setup_picture_factory(cfg, opt_index, factory):", "def _augment_info(self, header):\n # Information on carriers\n header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"AFFECTED_CARRIERS\"),\n (\"Number\", \"1\"),\n (\"Type\", \"Integer\"),\n (\"Description\", \"Number of affected samples from pedigree that are carriers\"),\n ]\n )\n )\n header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"UNAFFECTED_CARRIERS\"),\n (\"Number\", \"1\"),\n (\"Type\", \"Integer\"),\n (\"Description\", \"Number of unaffected samples from pedigree that are carriers\"),\n ]\n )\n )\n header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"BACKGROUND_CARRIERS\"),\n (\"Number\", \"1\"),\n (\"Type\", \"Integer\"),\n (\"Description\", \"Number of background samples that are carriers\"),\n ]\n )\n )\n for anno_args in self.args.annotation_beds:\n header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", anno_args[\"info\"]),\n (\"Number\", \".\"),\n (\"Type\", \"String\"),\n (\"Description\", anno_args[\"description\"]),\n ]\n )\n )\n return header", "def refine_product_headers(product, total_obj_list):\n hdu, closefits = _process_input(product)\n phdu = hdu[0].header\n # Insure rootname and filename keywords matches actual filename\n phdu['rootname'] = '_'.join(product.split('_')[:-1])\n phdu['filename'] = product\n\n # Determine level of the product\n level = 1 if len(phdu['rootname'].split('_')[-1]) > 6 else 2\n\n # Update PINAME keyword\n phdu['piname'] = phdu['pr_inv_l']\n\n # Start by updating the S_REGION keyword.\n compute_sregion(hdu)\n\n # Compute numexp as number of exposures NOT chips\n input_exposures = list(set([kw[1].split('[')[0] for kw in phdu['d*data'].items()]))\n if level == 1:\n ipppssoots = [fname.split('_')[0] for fname in input_exposures]\n phdu['ipppssoo'] = ';'.join(ipppssoots)\n phdu['numexp'] = len(input_exposures)\n\n # Convert dates to ISO format\n phdu['date-beg'] = (Time(phdu['expstart'], format='mjd').iso, \"Starting Date and Time\")\n phdu['date-end'] = (Time(phdu['expend'], format='mjd').iso, \"Ending Date and Time\")\n\n phdu['equinox'] = hdu[('sci', 1)].header['equinox'] if 'equinox' in hdu[('sci', 1)].header else 2000.0\n\n # Re-format ACS filter specification\n if phdu['instrume'] == 'ACS':\n phdu['filter'] = get_acs_filters(hdu, delimiter=';')\n\n # Insure PHOT* keywords are always in SCI extension\n for pkw in PHOT_KEYWORDS:\n if pkw in phdu:\n hdu[('sci', 1)].header[pkw] = (phdu[pkw], phdu.cards[pkw].comment)\n del phdu[pkw]\n\n # Apply any additional inputs to drizzle product header\n if level:\n hdu[0].header['haplevel'] = (level, \"Classification level of this product\")\n\n # Reset filter specification for total detection images which combine filters\n if 'total' in phdu['rootname']:\n phdu['filter'] = 'detection'\n\n # Build HAP table\n # if 'total' in product: level = 3\n update_hdrtab(hdu, level, total_obj_list, input_exposures)\n\n # close file if opened by this function\n if closefits:\n hdu.close()", "def test_header_update3(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"octr11hrq_raw.fits\")\n self.get_data(\"input\", \"octr11hrq_spt.fits\")\n\n capsys.readouterr()\n\n tastis('octr11hrq_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"octr11hrq HST/STIS G430M 31X0.05NDA ACQ/PEAK-UP\\n\" \\\n \"prop: 14341 visit: 11 line: 9 target: HD128621-2\\n\" \\\n \"obs date, time: 2016-08-28 22:33:14 exposure time: 0.10\\n\" \\\n \"dom GS/FGS: S7QX000303F1 sub-dom GS/FGS: S7QX000751F2\\n\" \\\n \"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(1022,32) corner=(25,500)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Scan type: LINEARAXIS1 Step size (mas): 39\\n\" \\\n \"\\n\" \\\n \" [5478 0 798 3264 4796 1923 4876]\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: 0.2 0.0 0.010 0.000 0.007 0.007\\n\" \\\n \"Flux in post-slew confirmation image (882661) - Pedestal (871184) = 11477 DN\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The flux in the confirmation image is 110% greater than the maximum flux\\n\" \\\n \"in the ACQ/PEAK scan. An excess greater than 100% indicates\\n\" \\\n \"problems in the ACQ/PEAK.\\n\" \\\n \"\\n\" \\\n \"The flux in the confirmation image is 57% of the recommended minimum\\n\" \\\n \"of 20000 DN for a dispersed-light ACQ/PEAK. The signal-to-noise in\\n\" \\\n \"the ACQ/PEAK may be inadequate for an accurate centering.\\n\" \\\n \"\\n\" \\\n \"The maximum flux in the sequence occurred at one end.\\n\" \\\n \"This may indicate that the target was beyond that end\\n\" \\\n \"or that a neighboring object affected the acquisition.\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"octr11hrq_raw.fits\", \"octr11hrq_raw_ref.fits\")]\n self.compare_outputs(outputs)", "def preparehspiceidvgGEO1v2(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,HFINparam,TFIN_TOPparam,TFIN_BASEparam,EOTparam,NBODYparam,NFINparam,PHIGparam,RSHSparam,RSHDparam):\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'HFINparam',HFINparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_TOPparam', TFIN_TOPparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_BASEparam',TFIN_BASEparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'EOTparam', EOTparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NBODYparam',NBODYparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam', NFINparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'PHIGparam', PHIGparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'RSHSparam', RSHSparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'RSHDparam', RSHDparam)", "def process_nd2(self):\n if self.parameters['filetype'] == 'tif':\n self.stack = pims.ImageSequenceND(self.parameters['filename'], axes_identifiers=['c','z'])\n self.parameters['channels'] = [f\"channel {i}\" for i in range(self.stack.sizes['c'])]\n else:\n self.stack = pims.open(self.parameters['filename'])\n self.stack.default_coords['t'] = 0\n self.parameters['channels'] = self.stack[0].metadata['channels']\n self.parameters['pixel_microns'] = self.stack[0].metadata['pixel_microns']\n if \"v\" in self.stack.sizes:\n self.stack.bundle_axes = 'vyx'\n self.has_multiple_series = True\n else:\n self.stack.bundle_axes = 'yx'\n self.has_multiple_series = False\n self.stack.iter_axes = 'z'\n tvMeta = ttk.Treeview(self.window)\n tvMeta['columns'] = (\"metaval\")\n tvMeta.column(\"#0\", width=250)\n tvMeta.column(\"metaval\", minwidth=250)\n tvMeta.heading(\"#0\", text=\"Key\", anchor=tk.W)\n tvMeta.heading(\"metaval\", text=\"Value\", anchor=tk.W)\n for metakey, metaval in self.stack[0].metadata.items():\n if not metaval:\n metaval = '' # replace attributes that can't be parsed with an empty string\n tvMeta.insert('', \"end\", text=metakey, values=(metaval))\n tvMeta.pack(side=tk.TOP, fill=tk.BOTH,expand=True)\n self.widgets['btnNext'] = tk.Button(self.window, text=\"Select channel >\", command=self.open_channelselector)\n self.widgets['btnNext'].pack(side=tk.TOP)", "def __updateMatC(self):\n\t\tif self.regScheme == 2:\n\t\t\tfor id1 in range(self.nTemplates):\n\t\t\t\tself.C[id1,id1] = 1.0 / self.w0[id1]**2", "def test_T2():\n infile = \"cisd/T2.in\"\n assert(os.path.exists(infile))\n with open(infile) as f:\n lines = f.readlines()\n assert(len(lines) == 10)\n\n hl1 = HirataLine(lines[0])\n assert(set(hl1.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl1.summation_indices == '')\n assert(hl1.prefactors == [\"+ 1.0 \"])\n assert(hl1.postfactors == ['v ( p3 p4 h1 h2 )'])\n assert(str(hl1) == lines[0].replace('\\n', ''))\n cl1 = Cc4sLine(hl1)\n assert(set(cl1.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl1.summation_indices == \"\")\n assert(cl1.prefactors == [\"+ 1.0 \"])\n assert(cl1.postfactors == ['Vabij[\"cdij\"]'])\n assert(cl1.to_cpp() == ['( + 1.0 ) * Vabij[\"cdij\"];'])\n\n hl8 = HirataLine(lines[7])\n assert(set(hl8.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl8.summation_indices == ' h6 p5 ')\n assert(\n hl8.prefactors == [\n '+ 1.0 ',\n '- 1.0 * P( p3 p4 h2 h1 => p4 p3 h2 h1 ) ',\n '- 1.0 * P( p3 p4 h2 h1 => p3 p4 h1 h2 ) ',\n '+ 1.0 * P( p3 p4 h2 h1 => p4 p3 h1 h2 ) '\n ]\n )\n assert(\n hl8.postfactors ==\n ['Sum ( h6 p5 )', 't ( p5 p3 h6 h2 )', 'v ( h6 p4 h1 p5 )']\n )\n assert(str(hl8) == lines[7].replace('\\n', ''))\n cl8 = Cc4sLine(hl8)\n assert(set(cl8.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl8.summation_indices == \" n e \")\n assert(\n cl8.prefactors ==\n ['+ 1.0 ',\n '- 1.0 * P( c d j i => d c j i ) ',\n '- 1.0 * P( c d j i => c d i j ) ',\n '+ 1.0 * P( c d j i => d c i j ) ']\n )\n assert(cl8.postfactors == ['Tabij[\"ecnj\"]', 'Viajb[\"ndie\"]'])\n assert(\n cl8.to_cpp() == [\n '( + 1.0 ) * Tabij[\"ecnj\"] * Viajb[\"ndie\"];',\n '( - 1.0 ) * Tabij[\"ednj\"] * Viajb[\"ncie\"];',\n '( - 1.0 ) * Tabij[\"ecni\"] * Viajb[\"ndje\"];',\n '( + 1.0 ) * Tabij[\"edni\"] * Viajb[\"ncje\"];'\n ]\n )", "def odemis_to_hyperspy(filename='sampledata/cltest.h5',specbin=1) :\r\n\r\n f=h5.File(filename,'r')\r\n shome = 'Acquisition2//ImageData/'\r\n x = f[shome + 'Image']\r\n cdesc =f['Acquisition2/PhysicalData/ChannelDescription'].value[0].decode('utf-8')\r\n #print(cdesc)\r\n\r\n cltype = None\r\n if 'Spectrum' in cdesc :\r\n cltype = 'spectrum'\r\n elif 'CL intensity' in cdesc:\r\n cltype = 'panchrom'\r\n\r\n print('<' + filename + '> original shape :' ,x.shape, cltype)\r\n\r\n # strip unused dimensions and transpose/ reverse index order\r\n if cltype == 'panchrom' :\r\n xx=x[0,0,0,:,:].transpose((1,0))\r\n # just an image..\r\n else :\r\n xx=x[:,0,0,:,:].transpose((2,1,0))\r\n\r\n if cltype == 'spectrum' :\r\n #interpolate data to linearize the wavelength scale\r\n w = f[shome + 'DimensionScaleC'].value *1e9\r\n wx = np.linspace(w.min(),w.max(),w.size)\r\n for i in np.arange(xx.shape[0]) :\r\n for k in np.arange(xx.shape[1]) :\r\n xx[i,k,:] = np.interp(wx,w,xx[i,k,:])\r\n\r\n wslope = wx[1]-wx[0]\r\n woffset = wx.min()\r\n #wx = np.arange(w.size)\r\n #wslope,woffset=np.polyfit(wx,w,1)\r\n s = hs.signals.Signal1D(xx)\r\n\r\n elif cltype == 'panchrom' :\r\n s = hs.signals.Signal2D(xx)\r\n else :\r\n print('unknown type')\r\n\r\n print('hyperspy shape :' ,s.data.shape)\r\n\r\n\r\n s.metadata.General.title = 'Odemis: ' + cdesc\r\n s.metadata.General.original_filename = filename\r\n s.metadata.General.notes = cltype\r\n s.axes_manager[0].name = 'pos x'\r\n s.axes_manager[0].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[0].offset = f[shome + 'XOffset'].value * 1e6\r\n s.axes_manager[0].units = 'um'\r\n\r\n\r\n s.axes_manager[1].name = 'pos y'\r\n s.axes_manager[1].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[1].offset = f[shome + 'YOffset'].value * 1e6\r\n s.axes_manager[1].units = 'um'\r\n\r\n if cltype == 'spectrum' :\r\n s.axes_manager[2].name = 'wavelength'\r\n s.axes_manager[2].units = 'nm'\r\n s.axes_manager[2].offset = woffset\r\n s.axes_manager[2].scale = wslope\r\n s.metadata.signal_type = 'CL'\r\n\r\n f.close()\r\n if (specbin > 1) and (cltype == 'spectrum'):\r\n return( s.rebin(scale=[1,1,specbin]) )\r\n else :\r\n return( s )\r\n #end odemis_to_hyperspy\r\n #######################\r", "def make_param_card(self, param_card):\n ## \\todo explain what param card is\n logger.debug(\"Making param card '%s'\" % param_card)\n\n with open(param_card, 'r') as paramin:\n data = paramin.readlines()\n\n for i in range(0, len(data)):\n if \"APMASS\" in data[i] and self.apmass is not None:\n data[i] = \" 622 %.7fe-03 # APMASS\" % (self.apmass) + '\\n'\n logger.debug(\"APMASS in param card set to %d\" % self.apmass)\n if \"map\" in data[i] and self.map is not None:\n data[i] = \" 622 %.7fe-03 # map\" % (self.map) + '\\n'\n if \"mpid\" in data[i] and self.mpid is not None:\n data[i] = \" 624 %.7fe-03 # mpid\" % (self.mpid) + '\\n'\n if \"mrhod\" in data[i] and self.mrhod is not None:\n data[i] = \" 625 %.7fe-03 # mrhod\" % (self.mrhod) + '\\n'\n\n with open(param_card, 'w') as paramout:\n paramout.writelines(data)", "def preparehspiceidvgGEO4(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,Ach_UFCMparam,Cins_UFCMparam,W_UFCMparam,NBODYparam,NFINparam):\n#L=Lparam Ach_UFCM=Ach_UFCMparam Cins_UFCM=Cins_UFCMparam W_UFCM=W_UFCMparam NBODY=NBODYparam NFIN=NFINparam\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Ach_UFCMparam',Ach_UFCMparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Cins_UFCMparam', Cins_UFCMparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'W_UFCMparam',W_UFCMparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NBODYparam',NBODYparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam', NFINparam)", "def giveMotevoParamFile(genome, wmlen, inter_dir, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior, bgorder, bgprior):\n\n ##UFE_models from genome_dict are not used anymore\n #UFEmodel_hg19 is UFE model for mammal species\n genome_dict = {}\n genome_dict['hg19'] = ['((((hg19:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau6:0.186713,(equCab2:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom5:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_hg19']\n genome_dict['hg18'] = ['((((hg18:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau3:0.186713,(equCab1:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom4:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFE_mammals']\n #genome_dict['dm3'] = ['((((((dm3:0.059,droSim1:0.075):0.041,(droYak2:0.104,droEre2:0.107):0.054):0.120,droAna3:0.377):0.072,dp4:0.397):0.061,droWil1:0.536):0.020,((droVir3:0.196,droMoj3:0.255):0.073,droGri2:0.291):0.337);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_dm3']\n genome_dict['dm3'] = ['((((((dm3:0.059,droSim1:0.075):0.041,(droYak2:0.104,droEre2:0.107):0.054):0.120,droAna3:0.377):0.072,dp4:0.397):0.061,droWil1:0.536):0.020,((droVir3:0.196,droMoj3:0.255):0.073,droGri2:0.291):0.337);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/dm3UFEparallel/UFEmodel_dm3']\n genome_dict['mm9'] = ['((((hg19:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau7:0.186713,(equCab2:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom5:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_mm9']\n\n\n sitefilepath = os.path.join(inter_dir, 'sites_' + tag)\n priorfilepath = os.path.join(inter_dir, 'priors_' + tag)\n loglikfile = os.path.join(inter_dir, 'loglik_' + tag)\n\n\n print '\\nCreate motevo parameter file %s' %tag\n print 'aligned', aligned\n if aligned:\n motevo_params = '\\n'.join(['refspecies %s' %genome,\n 'TREE %s' %genome_dict[genome][0],\n 'Mode TFBS',\n 'EMprior %s' %emprior,\n 'priordiff %s' %0.05,\n 'UFEwmprior %s' %200,\n 'UFEwmfile %s' %ufemodel_path,\n 'UFEwmlen %s' %wmlen,\n 'UFEprint %s' %0,\n 'markovorderBG %s' %bgorder,\n 'bgprior %s' %bgprior,\n 'bg A %s' %ATfreq,\n 'bg T %s' %ATfreq,\n 'bg G %s' %GCfreq,\n 'bg C %s' %GCfreq,\n 'restrictparses %s' %0,\n 'sitefile %s' %sitefilepath,\n 'priorfile %s' %priorfilepath,\n 'printsiteals %s' %0,\n 'minposterior %f' %0.0,\n 'loglikfile %s' %loglikfile])\n else:\n motevo_params = '\\n'.join(['refspecies %s' %genome,\n 'TREE (%s: 1)' %genome,\n 'Mode TFBS',\n 'EMprior %s' %emprior,\n 'priordiff %s' %0.05,\n 'markovorderBG %s' %bgorder,\n 'bgprior %s' %bgprior,\n 'bg A %s' %ATfreq,\n 'bg T %s' %ATfreq,\n 'bg G %s' %GCfreq,\n 'bg C %s' %GCfreq,\n 'restrictparses %s' %0,\n 'sitefile %s' %sitefilepath,\n 'priorfile %s' %priorfilepath,\n 'printsiteals %s' %0,\n 'minposterior %f' %0.0,\n 'loglikfile %s' %loglikfile]) \n\n params_path = os.path.join(inter_dir, 'motevo_TFBS_params_' + tag)\n pf = open(params_path, 'w')\n pf.write(motevo_params)\n return (params_path, sitefilepath, priorfilepath, loglikfile)", "def __init__(self, train, transform, data_path='../extracted', category_path=\"../all_fish.txt\"):\n self.transform = transform\n data_path = os.path.join(data_path, 'train' if train else 'test')\n fp = open(category_path, 'r')\n self.fish_dict = {i.split(\";\")[0] : i.split(\";\")[1][1:-1] for i in fp}\n \n #self.name_to_label = [i.split(\";\")[1][1:-1] for i in fp]\n self.image_paths = glob.glob(data_path + '/*.jpg')", "def odemisSEM_to_hyperspy(filename='sampledata/cltest.h5') :\r\n\r\n f=h5.File(filename,'r')\r\n shome = 'Acquisition1//ImageData/'\r\n x = f[shome + 'Image']\r\n cdesc =f['Acquisition1/PhysicalData/ChannelDescription'].value[0].decode('utf-8')\r\n #print(cdesc)\r\n\r\n\r\n print('<' + filename + '> original shape :' ,x.shape)\r\n # strip unused dimensions and transpose/ reverse index order\r\n xx=x[0,0,0,:,:].transpose((1,0))\r\n\r\n s = hs.signals.Signal2D(xx)\r\n print('hyperspy shape :' ,s.data.shape)\r\n\r\n\r\n s.metadata.General.title = 'Odemis: ' + cdesc\r\n s.metadata.General.original_filename = filename\r\n #s.metadata.General.notes = cltype\r\n s.axes_manager[0].name = 'pos x'\r\n s.axes_manager[0].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[0].offset = f[shome + 'XOffset'].value * 1e6\r\n s.axes_manager[0].units = 'um'\r\n\r\n\r\n s.axes_manager[1].name = 'pos y'\r\n s.axes_manager[1].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[1].offset = f[shome + 'YOffset'].value * 1e6\r\n s.axes_manager[1].units = 'um'\r\n\r\n\r\n f.close()\r\n return( s )\r\n #end odemisSEM_to_hyperspy\r\n #######################\r", "def test_crtf_header():\n crtf_str = ('#CRTFv0 CASA Region Text Format version 0\\n'\n 'circle[[42deg, 43deg], 3deg], coord=J2000, color=green')\n reg = Regions.parse(crtf_str, format='crtf')[0]\n assert isinstance(reg, CircleSkyRegion)\n assert reg.center.ra.value == 42.0\n assert reg.center.ra.unit == 'deg'\n assert reg.center.dec.value == 43.0\n assert reg.center.dec.unit == 'deg'\n assert reg.radius.value == 3.0\n assert reg.radius.unit == 'deg'" ]
[ "0.6119822", "0.52715033", "0.52555937", "0.521904", "0.5153852", "0.5145439", "0.50969875", "0.5046172", "0.5035334", "0.5006989", "0.49636608", "0.49189067", "0.49184573", "0.4891116", "0.4880313", "0.4876546", "0.48572096", "0.4852319", "0.48459044", "0.4828242", "0.48211944", "0.4816301", "0.4812694", "0.4803674", "0.47828144", "0.4772682", "0.4767819", "0.47572675", "0.4753877", "0.47485572", "0.4744925", "0.47420624", "0.4741413", "0.4722203", "0.47129938", "0.47110823", "0.4704231", "0.47035655", "0.47026095", "0.46992204", "0.46922207", "0.46914074", "0.46766132", "0.46655464", "0.46637776", "0.46600464", "0.46589407", "0.46488172", "0.46340117", "0.46291155", "0.46288937", "0.4628122", "0.4627312", "0.46252075", "0.46171197", "0.46121395", "0.46030474", "0.459885", "0.45967022", "0.45939752", "0.45915076", "0.45894653", "0.45872048", "0.4583935", "0.4578405", "0.45716244", "0.45698202", "0.45646128", "0.45644155", "0.4555637", "0.4552494", "0.45517385", "0.4546868", "0.45433304", "0.45425668", "0.4540414", "0.45353794", "0.45339152", "0.45308337", "0.45259523", "0.45165974", "0.45130324", "0.45065406", "0.45016265", "0.45009574", "0.44980493", "0.4495017", "0.4494322", "0.44935718", "0.44914007", "0.44906485", "0.4489022", "0.44878814", "0.44873703", "0.44844535", "0.4484387", "0.4483582", "0.44803724", "0.4477898", "0.44752023" ]
0.60989493
1
Find all occurences of val on list lo Returns a list of indices of val on lo.
def findall(lo,val): u = [] i = -1 while( i < len(lo)-1): try: i = lo.index(val,i+1) u.append(i) except: i += 1 return u
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]", "def getIndexes(self, val):\n # Find where this value is listed. \n valNdx = (self.values == val).nonzero()[0]\n \n # If this value is not actually in those listed, then we \n # must return empty indexes\n if len(valNdx) == 0:\n start = 0\n end = 0\n else:\n # The index into counts, etc. for this value. \n valNdx = valNdx[0]\n start = self.start[valNdx]\n end = self.end[valNdx]\n \n # Create a tuple of index arrays, one for each index of the original array. \n ndx = ()\n for i in range(self.nDims):\n ndx += (self.indexes[start:end, i], )\n return ndx", "def matchloc(alist,val): \n return [ilc for ilc,jlc in enumerate(alist) if jlc==val]", "def all_indices(haystack, needle):\n index = 0\n indices = list()\n while True:\n try:\n i = haystack.index(needle, index)\n except ValueError:\n break\n indices.append(i)\n index = i+1\n return indices", "def get_indexes(from_list, find_list):\n\n df_find = pd.DataFrame(find_list, columns=['value'])\n df_from = pd.DataFrame(list(zip(from_list, np.arange(len(from_list)))), columns=['value', 'index'])\n indexes = pd.merge(df_from, df_find, on='value', how='inner')['index'].values\n return indexes", "def find_value(lists, target):\n loc = []\n l = len(lists)\n for i in range(0, l, 1):\n if(lists[i] == target):\n loc.append(i)\n else:\n continue\n return loc", "def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices", "def getAllIndex(ldata, fldata):\n\treturn list(map(lambda e : fldata.index(e), ldata))", "def map_values_to_value_list(value_list, values):\n return [value_list.index(x) for x in values]", "def get_coincidence_indices(self, lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset+1)\n except ValueError:\n return result\n result.append(offset)", "def indexof(self, value, tag=WORD):\n match = lambda a, b: a.endswith(\"*\") and b.startswith(a[:-1]) or a==b\n indices = []\n for i in range(len(self.words)):\n if match(value, unicode(self.get(i, tag))):\n indices.append(i)\n return indices", "def indexAll(inputList=None, value=None):\r\n if not isinstance(inputList, list):\r\n raise TypeError('Input list must be a list object.')\r\n return [i for i, x in enumerate(inputList) if x == value]", "def find_index(vec_vals,target):\n target=np.atleast_1d(target) #turn scalar into iterable, no op if already array\n vec_vals=np.array(vec_vals)\n index_list=[]\n for item in target:\n first_index=np.argmin(np.abs(vec_vals - item))\n index_list.append(first_index)\n return index_list", "def findIndices(g):\r\n change = [0]\r\n seen = [g[0]]\r\n for i in range(1, len(g)):\r\n if not g[i] in seen:\r\n change.append(i)\r\n seen.append(g[i])\r\n return change", "def indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def linearSearch(values: list, target: int) -> int:\n for i in range(len(values)):\n if target == values[i]:\n return i\n \n return -1", "def coord_indices_of(self, v_list):\n return [self.coord_index_of(v) for v in v_list]", "def find_at(self, x, y):\n return list(self.ifind_at(x, y))", "def lc_index(*args):\n index = []\n x = check_lc_data(args[0])\n i = 0\n for line in args[0].Data.LCData.lc_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def int_to_indices(value: int, length: int, radix_bits: int) -> Iterable[int]:\n mask = (1 << radix_bits) - 1\n return ((value >> (i * radix_bits)) & mask for i in reversed(range(length)))", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def positions(self, searchstr: str):\n indices = []\n index = mybinsearch(self.sarray, searchstr, self.comp)\n if index >= 0:\n indices.append(index)\n return indices", "def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))", "def index(self,*val):\n if len(val): self._value = self.allele_set[val[0]]\n return self.allele_set.index(self.value())", "def get_indexes(self, x):\n indexes = []\n for index_hashes in self.hash_functions:\n combined_index = []\n for idx_spec, hash_func in zip(self.config.index_specs, index_hashes):\n combined_index.append(idx_spec.distribution.get_index(hash_func(x)))\n indexes.append(tuple(combined_index))\n return indexes", "def my_index(list_, element):\n pos = []\n for i in range(len(list_)):\n if list_[i] == element:\n pos.append(i)\n return pos", "def return_inds(arr, target):\n\n # Convert list to numpy array\n arr = np.array(arr)\n # Determine all possible combinations, excluding combinations of the same number\n arr_combs = list(combinations(arr, 2))\n \n # Determine the sum of each combination\n sum_arr = np.array(list((map(sum, arr_combs)))) \n \n # Determine the index where the sum is equal to our target\n vals = arr_combs[np.where(sum_arr == target)[0][0]]\n \n # Determine the two indices\n ind_1 = np.where(arr == vals[0])[0][0]\n ind_2 = np.where(arr == vals[1])[0][0]\n\n return ind_1, ind_2", "def return_indices(nums, target):\n indices = []\n i = 0\n number_found = False\n while not number_found:\n my_target = nums[i]\n \n for j in range(i+1,len(nums)):\n my_target += nums[j]\n if my_target == target:\n number_found = True\n indices = [i, j]\n break\n my_target = nums[i]\n \n i+=1\n return indices", "def get_row_indices(df, col, vals):\n\n return list(df[df[col].isin(vals)].index)", "def get_positions(token, docs):\n\n all_matches = [token]\n for doc in docs:\n matches = []\n if token in doc:\n indexes = [i for i, x in enumerate(doc) if x == token]\n matches += [docs.index(doc), len(indexes), indexes]\n if matches:\n all_matches.append(matches)\n return all_matches", "def scan(self) -> list[int]:", "def find_needle_in_haystack(self, needle, haystack): \n r = [] \n L = len(needle) \n for i in range(len(haystack)): \n if haystack[i:i+L] == needle: \n r.append(i)\n return r", "def search_linear(xs, target):\n for (i, v) in enumerate(xs):\n if v == target: # Is referred to as a probe.\n return i\n return -1", "def __getHints(self, p):\n st = bisect.bisect_left(self.index, (p[:self.ln], -1)) # binary search\n en = bisect.bisect_right(self.index, (p[:self.ln], sys.maxsize)) # binary search\n hits = self.index[st:en] # this range of elements corresponds to the hits\n return [h[1] for h in hits] # return just the offsets", "def scan(self) -> List[int]:", "def scan(self) -> List[int]:", "def positions(self, searchstr: str):\n out = []\n for x in range(0, len(self.sa)):\n sub = self.sa[x]\n if searchstr == sub[0:len(searchstr)]:\n out.append(x)\n return out\n \n pass", "def linear_search(lst, value):\n i = 0\n while i != len(lst) and lst[i] != value:\n i = i + 1\n if i == len(lst):\n return -1\n else:\n return i", "def indexer(list1, list2):\r\n\tindex_list = []\r\n\tfor x in list2:\r\n\t\tfor y in list1:\r\n\t\t\tif x == y:\r\n\t\t\t\tindex = list1.index(x)\r\n\t\t\t\tindex_list.append(index)\r\n\treturn index_list", "def get_indexes_of(number, int_list):\n\n index = 0\n result = []\n while True:\n if is_end_of_list(int_list, index):\n break\n if number in int_list[index:]: # if number is found in (the rest of) the int_list\n result.append(index + int_list[index:].index(number)) # result = [3]\n index = result[-1] + 1 # index = 4\n continue\n else: # cannot find the number in (the rest of) the int_list\n break\n return result # [3,7]", "def get_indexes(self, variable, *args):\n\n return [get_subset_idxs(data, min, max)\n for data, (min, max) in args]", "def occurence(main_seq,sub_seq):\n start= 0\n indices =[]\n while True:\n start = main_seq.find(sub_seq,start)\n if start > 0:\n indices.append(start)\n else:\n break\n start +=1\n return indices", "def getall(l, idx):\n return [l[i] for i in idx]", "def indices_of(self, col_name, value):\n return list(self._obj[self._obj[col_name] == value].index\n ) if col_name in self._obj.columns else None", "def get_indexes(self):\n return set(k.index for k in self if k.has_index)", "def indices(self, fit):\r\n lam = self.lam_reeval if self.lam_reeval else 2 + len(fit) / 20\r\n reev = int(lam) + ((lam % 1) > np.random.rand())\r\n return np.argsort(array(fit, copy=False)[:2 * (reev + 1)])[:reev]", "def findings_2_idx(findings, corner_2_idx, funcx, funcy):\n idx = []\n for finding in findings:\n x, y = finding\n mesh = np.array(np.meshgrid(funcx(x), funcy(y))).swapaxes(1,2).reshape(2,-1).T\n idx.extend([corner_2_idx(c) for c in mesh])\n\n return np.unique(idx)", "def getIndicesGlobCurrent(lons, lats):\n if np.size(lons) == 1:\n lon_0, lon_1 = int(np.floor(lons-5)), int(np.ceil(lons+5))\n else:\n lon_0, lon_1 = int(np.round(np.min(lons))), int(np.round(np.max(lons)))\n\n if np.size(lats) == 1:\n lat_0, lat_1 = int(np.floor(lats-5)), int(np.ceil(lats+5))\n else:\n lat_0, lat_1 = int(np.round(np.min(lats))), int(np.round(np.max(lats)))\n\n lon_range = range((lon_0-5+180)*4-1, (lon_1+5+180)*4+1)\n lat_range = range((lat_0-5+80)*4-1, (lat_1+5+80)*4+1)\n\n indices = {\"lon\": lon_range,\n \"lat\": lat_range}\n\n print \"getIndicesGlobCurrent(): Success! Indices created.\"\n return indices", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def get_lis(a):\n dp = [0]*len(a)\n aux = [inf]*(len(a)+1)\n aux[0] = -inf\n high = 0\n for i in range(len(a)):\n dp[i] = bisect_left(aux, a[i])\n aux[dp[i]] = min(aux[dp[i]], a[i])\n high = max(high, dp[i])\n return high", "def get_possible_indexes(self, factors_lock, random_state):\n factor_list = [[]]\n for i, lock_val in enumerate(factors_lock):\n ## if unlocked, number of possible factor-samples are multiplied by the number of possible unlocked factor values\n if lock_val == -1 : \n base = factor_list\n factor_list = []\n for b in base:\n bc = b.copy()\n for f in range(self.factor_sizes[i]):\n bc.append(f)\n factor_list.append(bc)\n bc = b.copy()\n \n else: ## if factor is locked, just append the locked factor value\n for i in range(len(factor_list)):\n factor_list[i].append(lock_val)\n #transform to index\n possible_indexes = self._features_to_state_space_index(np.asarray(factor_list)) \n return possible_indexes", "def FindIdxValues(X):\n data = X.select_dtypes(include=[\"float64\"])\n idx = np.argwhere(~np.isnan(data.values))\n idx[:, 1] += 4 # add ID variable columns\n StoE = pd.read_csv(\"msresist/data/MS/CPTAC/IDtoExperiment.csv\")\n assert all(StoE.iloc[:, 0] == data.columns), \"Sample labels don't match.\"\n StoE = StoE.iloc[:, 1].values\n tmt = [[StoE[idx[ii][1] - 4]] for ii in range(idx.shape[0])]\n return np.append(idx, tmt, axis=1)", "def list_item_indexes(list_arg: list, item: Any) -> Tuple[int, ...]:\n indexes = [index for index, value in enumerate(list_arg) if value == item]\n return indexes", "def get_os_indices_list(common_os_dict):\n\n indices_list = []\n os_values_list = common_os_dict.values()\n for os_entry in os_values_list:\n indices_list.append(os_entry)\n\n return indices_list", "def parameter_finder(target_list, search_list, msgflag=False, exact=False):\n target_list = [x.lower() for x in target_list]\n\n indexes = []\n\n if isinstance(search_list, str):\n cont = 0\n search_list = search_list.lower()\n for t in target_list:\n if exact == False and search_list in t:\n indexes.append(cont)\n elif exact == True and search_list == t:\n indexes.append(cont)\n cont += 1\n if isinstance(search_list, list):\n search_list = [x.lower() for x in search_list]\n\n for s in search_list:\n s = str(s)\n for cont, t in enumerate(target_list):\n if exact == False and s in t:\n print((s, t))\n indexes.append(cont)\n elif exact == True and s == t:\n print((s, t))\n indexes.append(cont)\n\n if msgflag == True:\n length = len(indexes)\n if length > 1: print(\"There were several ocurrences\")\n if length == 0: print(\"No ocurrences found\")\n\n return indexes", "def __valuesToIndices(self, mappings, values):\n indices = np.empty(0, dtype=np.int_)\n\n for key, _ in mappings.items():\n # Lookup the index of the value of the values in the map.\n index = mappings[key](values[key])\n\n indices = np.hstack((indices, index))\n\n return indices", "def find_all_lists(rij):\n\n langste_rij = [0]*len(rij)\n langste_rij[0] = 1\n\n for i in range(len(rij)):\n for j in range(i):\n #print \"*******\", i, rij[i], j, langste_rij[i]\n if ((rij[j] < rij[i]) and (langste_rij[i] < langste_rij[j]+1)):\n langste_rij[i] = langste_rij[j] + 1\n\n return langste_rij", "def get_indexes(self, items: Iterable[_T]) -> List[int]:\n return [self.get_index(item) for item in items]", "def find_indices(li, first_elt, second_elt):\r\n index1, index2 = li.index(first_elt), li.index(second_elt)\r\n if index1 == index2:\r\n index2 = index1 + 1 + li[index1+1:].index(second_elt)\r\n if index1 > index2:\r\n index1, index2 = index2, index1\r\n return (index1+1, index2+1)", "def indices_of_label(self, label_name):\n return self.indices_of('label', label_name)", "def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1", "def _get_indexes(self, participants):\n tr_idx = int(np.floor(self.tr_size*len(participants)))\n j = self.val_size + self.tr_size\n val_idx = int(np.floor(j*len(participants)))\n return tr_idx, val_idx", "def find_all_elements(grid, target):\n \n indices = []\n \n ### This pattern of iterating through row and col indices is very common\n for row_number in range(len(grid)):\n for col_number in range(len(grid[row_number])):\n \n if grid[row_number][col_number] == target:\n indices.append((row_number, col_number))\n \n return indices", "def fn(nums):\n ans, vals = [], []\n for i, x in enumerate(nums): \n k = bisect_left(vals, x)\n if k == len(vals): vals.append(x)\n else: vals[k] = x\n ans.append(k)\n return ans", "def arg_indices(concept, mentions, toks):\r\n indices = []\r\n for i, tok in enumerate(toks):\r\n for m in mentions:\r\n if 'start' not in m:\r\n logging.warning('%s', m)\r\n if m['id'] == concept and m['start'] <= tok['start'] and tok['end'] <= m['end']:\r\n indices.append(i)\r\n break\r\n return indices", "def index_two_v2(values):\n\n pairs = []\n for i in range(len(values)):\n pairs.append((values[i], i))\n pairs.sort()\n return pairs[0][1], pairs[1][1] # indices of the values are in location 1 of each pair", "def linear_search(vlist, srchval): # somewhat different from book\n#Look at each item in list. If it equals the value you are looking for, stop.\n # linear_search_2.py\n index = 0\n for item in vlist:\n if item == srchval:\n return index # implicit break\n index += 1\n \n return -1", "def get_idx_from_sent(sent, word_idx_map, max_l=50, filter_h=3):\n x = []\n x_mask = []\n pad = filter_h - 1\n for i in xrange(pad):\n x.append(0)\n x_mask.append(0)\n words = sent.split()\n for i, word in enumerate(words):\n if i >= max_l: break\n if word in word_idx_map:\n x.append(word_idx_map[word])\n x_mask.append(1)\n while len(x) < max_l+2*pad:\n x.append(0)\n x_mask.append(0)\n for e in x_mask:\n x.append(e)\n return x", "def min_indice(L):\n min_l = min(L)\n return [min_l,np.where(L==min_l)[0][0]]", "def find_indices(self,nr_frame,label,forward=True):\n if forward:\n index = nr_frame-self.first_frame\n label_list=[label]\n #Fetches the 10 first frames. 10 is arbitrary\n n_iterations = min(10,len(self.correspondance_lists)-index-1 )\n for i in range(n_iterations):\n corresp_list = self.correspondance_lists[index+i]\n match = [v for u,v in corresp_list if u==label_list[index+i]]\n match = match[0]\n if match==-1:\n break\n \n label_list.append(match)\n return label_list\n \n else:\n index = nr_frame-self.first_frame\n label_list=[label]\n #Fetches the 10 first frames. 10 is arbitrary\n n_iterations = min(10,index )\n for i in range(n_iterations):\n corresp_list = self.correspondance_lists[index-i]\n match = [u for u,v in corresp_list if v==label_list[index-i]]\n match = match[0]\n if match==-1:\n break\n label_list.append(match)\n return label_list", "def get_pixel_indices(self, lats, lons):\n return self._hpx.get_pixel_indices(lats, lons)", "def index(liste, value):\n\n for ii in range(len(liste)):\n if liste[ii] == value:\n return ii\n return None", "def get_all_occurences(s, word):\n res = [0]\n print(\"Searching for \" + word)\n while True:\n try:\n idx = s.index(word, res[-1])\n res.append(idx+1)\n #print(\"found at \", idx)\n except ValueError:\n break\n return res[1:]", "def get_indexes_for_word (self,word):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,word,)\r\n db_cursor.execute(\"SELECT note_index\"\r\n +\" FROM word_to_indexes\"\r\n +\" WHERE notebook=? and word=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n\r\n return self.word_dict[word]", "def linear_search(self, num_lst, key):\r\n # Running time: O(n)\r\n for i in range(len(num_lst)):\r\n if num_lst[i] == key:\r\n return i\r\n \r\n return -1", "def value_to_idx(val_range, unique_values, run_idx):\n return np.where(unique_values == val_range[run_idx])[0]", "def linear_search(element, list_of_elements):\n for i, elem in enumerate(list_of_elements):\n if elem == element:\n return i\n return None", "def indices_of_specie(self, specie: Union[int, str]) -> List[int]:\n return [i for i, spec in enumerate(self.coded_species)\n if spec == specie]", "def get_antecedent_constant_indexes(rule):\n constant_indexes = dict()\n for antecedent_atom in extract_logic_predicates(rule.antecedent):\n predicate = antecedent_atom.functor.name\n indexes = {\n i\n for i, arg in enumerate(antecedent_atom.args)\n if isinstance(arg, Constant)\n }\n if len(indexes) > 0:\n constant_indexes[predicate] = indexes\n return constant_indexes", "def indsWithin(vals, extr, edges=True):\n assert np.ndim(vals) == 1, \"Only `ndim = 1` arrays allowed!\"\n bnds = minmax(extr)\n if(edges):\n inds = np.where((vals >= bnds[0]) & (vals <= bnds[1]))[0]\n else:\n inds = np.where((vals > bnds[0]) & (vals < bnds[1]))[0]\n\n return inds", "def _xy_locs(mask):\n y, x = mask.nonzero()\n return list(zip(x, y))", "def linear_search_sentinal(lst, value):\n\n lst.insert(0, value)\n\n i = len(lst) - 1\n\n while lst[i] != value:\n i = i - 1\n\n lst.pop(0)\n\n if i == 0:\n return -1\n else:\n return i - 1", "def __find_index(arr, val):\n if val is not None:\n return numpy.searchsorted(arr, val)\n else:\n return val", "def getindex(self,name,searchfrom='name'):\n name = name.replace(':','_').lower()\n pat = re.compile(name)\n result = []\n\n for (i,elem) in enumerate(self.lat):\n if pat.search(elem[searchby]):\n result.append(i)\n return result", "def findidx(X, v, tol=1e-3):\n\tloc = -1\n\tdiff = 1e15 # Take a big difference\n\tn = len(X)\n\n\tfor i in xrange(n):\n\t\tndiff = abs(X[i]-v)\n\t\tif ndiff <= tol and ndiff < diff:\n\t\t\tloc = i\n\t\t\tdiff = ndiff\n\t\n\treturn loc", "def _subset(lst: list, val_filter: str) -> int:\n \n for i, v in enumerate(lst):\n if v == val_filter:\n return i", "def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]", "def get_idx_from_sent(self, sent, word_idx_map, max_l=45, k=300, filter_h=5):\n x = []\n pad = filter_h - 1\n # for i in xrange(pad):\n # x.append(0)\n words = sent.split()\n for word in words:\n if word in word_idx_map:\n x.append(word_idx_map[word])\n if len(x)==max_l+pad:\n break\n while len(x) < max_l+2*pad:\n x.append(0)\n return x", "def find_offsets(haystack, needle):\n\toffs = -1\n\twhile True:\n\t\toffs = haystack.find(needle, offs+1)\n\t\tif offs == -1:\n\t\t\tbreak\n\t\telse:\n\t\t\tyield offs", "def binary_search(alist, target):\n index = binary_search_iterative(alist, target)\n return index", "def binary_search(self, num_lst, key):\r\n # Running time: O(log n) with O(n logn) overhead\r\n # get sorted list\r\n num_lst = sorted(num_lst)\r\n \r\n low, high, idx = 0, len(num_lst), -1\r\n \r\n while low < high:\r\n mid = int(math.floor((low+high) / 2.0))\r\n \r\n if key < num_lst[mid]: high = mid - 1\r\n elif key > num_lst[mid]: low = mid + 1\r\n elif key == num_lst[mid]: \r\n idx = mid\r\n return idx\r\n \r\n return idx", "def get_map_values(self, lons, lats, ibin=None):\n pix_idxs = self.get_pixel_indices(lons, lats, ibin)\n idxs = copy.copy(pix_idxs)\n\n m = np.empty_like(idxs[0], dtype=bool)\n m.fill(True)\n for i, p in enumerate(pix_idxs):\n m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i])\n idxs[i][~m] = 0\n\n vals = self.counts.T[idxs]\n vals[~m] = np.nan\n return vals", "def __twoSum(self, numbers, target):\n dic = {}\n for i, value in enumerate(numbers):\n complement = target - value\n if complement in dic:\n return [dic[complement], i]\n else:\n # index the new value\n dic[value] = i", "def get_idx_set(i, sets):\n idxs = []\n for j, set_j in enumerate(sets):\n if i in set_j: idxs.append(j)\n return idxs", "def _get_indices_from_iss(self, iss):\n iss = [iss] if type(iss) not in [np.ndarray, list] else iss\n if self.iss is not None:\n inds = []\n for i in iss:\n inds.append(list(self.iss).index(i))\n# else:\n# inds = iss\n return inds", "def find_position(self, val):\n edges = np.array(self.cell_edges)\n if val in edges:\n index = np.searchsorted(edges, val)\n return index, index\n else:\n edges -= val\n if edges[0] > 0:\n return -1, 0\n if edges[-1] < 0:\n return 0, -1\n index = 0\n for i, e in enumerate(edges):\n if e > 0:\n index = i\n break\n return index - 1, index", "def linear_search_iterative(alist, target):\n index_target = None\n found = False\n index_current = 0\n while index_current < len(alist) and found is False:\n if alist[index_current] == target:\n index_target = index_current\n found = True\n index_current += 1\n return index_target" ]
[ "0.70793176", "0.7071109", "0.66156113", "0.6319743", "0.6250533", "0.620535", "0.62024206", "0.619781", "0.6169729", "0.60985184", "0.6077737", "0.6067603", "0.5907215", "0.5842827", "0.58379203", "0.5817054", "0.58101517", "0.5786621", "0.577902", "0.5734879", "0.5654707", "0.5616727", "0.55868834", "0.5581693", "0.5579649", "0.5515418", "0.5482658", "0.5481943", "0.54432505", "0.54349846", "0.5433607", "0.541198", "0.540692", "0.53990185", "0.5387827", "0.5384745", "0.53846455", "0.53846455", "0.53340465", "0.532639", "0.5321772", "0.53210115", "0.53143036", "0.5314129", "0.5304778", "0.5303014", "0.5294939", "0.5279905", "0.5277896", "0.5274131", "0.52718896", "0.52718896", "0.52714247", "0.52589935", "0.5243768", "0.523001", "0.52261263", "0.5225127", "0.5199031", "0.51928705", "0.5179667", "0.5179497", "0.51731926", "0.51698726", "0.5160939", "0.5160316", "0.51357085", "0.5130136", "0.5127168", "0.5125203", "0.51217115", "0.5116338", "0.510652", "0.5101254", "0.5095237", "0.50888777", "0.50886786", "0.5088214", "0.50826013", "0.5080806", "0.5080801", "0.5071751", "0.507141", "0.5069089", "0.506789", "0.50531656", "0.50515014", "0.5047353", "0.5045569", "0.5045092", "0.5040066", "0.5038577", "0.5034531", "0.50313795", "0.5028644", "0.5016542", "0.50148255", "0.50136113", "0.5007277", "0.50035566" ]
0.8087934
0
Find overall phi angle and z shift difference between two sets of projection parameters for helical structure. The two sets have to be of the same length and it is assume that k'th element on the first list corresponds to the k'th element on the second list.
def helical_consistency(p2i, p1): from pixel_error import angle_diff from math import cos,pi from utilities import getvec from pixel_error import angle_error from EMAN2 import Vec2f n =len(p1[0]) print n qtm = -1.0e10 for lf in xrange(0,181,180): p2 = [] p2.extend(p2i) if( lf == 180): tflip = Transform({"type":"spider","theta":180.0}) for j in xrange(n): t2 = Transform({"type":"spider","phi":p2[0][j],"theta":p2[1][j],"psi":p2[2][j]}) t2.set_trans( Vec2f( -p2[3][j], -p2[4][j] ) ) t2 = t2*tflip d = t2.get_params("spider") p2[0][j] = d["phi"] p2[1][j] = d["theta"] p2[2][j] = d["psi"] p2[3][j] = -d["tx"] p2[4][j] = -d["ty"] tt1 = [0.0]*n tt2 = [0.0]*n mirror = [False]*n ln = 0 for j in xrange( n ): t1 = getvec(p1[0][j],p1[1][j]) t2 = getvec(p2[0][j],p2[1][j]) tm = getvec(180.0+p2[0][j],180.0-p2[1][j]) tt1[j] = t1[0]*t2[0]+t1[1]*t2[1]+t1[2]*t2[2] tt2[j] = t1[0]*tm[0]+t1[1]*tm[1]+t1[2]*tm[2] if(abs(tt1[j])<1.0e-7): tt1[j] = 0.0 if(abs(tt2[j])<1.0e-7): tt2[j] = 0.0 if(tt1[j]>tt2[j]): mirror[j] = True ln+=1 print " FLIP ",lf if(ln < n//2): print "mirror ",ln for j in xrange( n ): p2[0][j] += 180.0 p2[1][j] = 180.0-p2[1][j] p2[2][j] = -p2[2][j] p2[4][j] = -p2[4][j] mirror[j] = not(mirror[j]) else: print " straight", ln phi1 = [] phi2 = [] agree = [] for j in xrange(n): if(mirror[j]): phi1.append(p1[0][j]) phi2.append(p2[0][j]) agree.append(j) print len(phi1) delta_phi = angle_diff( phi2, phi1 ) print "close form diff===", delta_phi phi1 = [] phi2 = [] errorm = [] for j in xrange( len( p1[0]) ): p2[0][j] = (p2[0][j] + delta_phi + 360)%360.0 if(mirror[j]): phi1.append(p1[0][j]) phi2.append(p2[0][j]) errorm.append(angle_error( [ p2[0][j] ], [ p1[0][j] ])) qt = sum(errorm)/len(errorm) print len(errorm),qt if(qt > qtm): qtm = qt p2o = [] p2o.extend(p2) errormo = [] phi1o = [] phi2o = [] errormo.extend(errorm) phi1o.extend(phi1) phi2o.extend(phi2) return p2o, errormo, agree, delta_phi, phi1o, phi2o
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotation_between_anglesets(agls1, agls2):\n\tfrom math import sin, cos, pi, sqrt, atan2, acos, atan\n\tfrom numpy import array, linalg, matrix\n\timport types\n\n\tdeg2rad = pi/180.0\n\n\tdef ori2xyz(ori):\n\t\tif(type(ori) == types.ListType):\n\t\t\tphi, theta, psi = ori[:3]\n\t\telse:\n\t\t\t# it has to be Transformation object\n\t\t\td = ori.get_params(\"spider\")\n\t\t\tphi = d[\"phi\"]\n\t\t\ttheta = d[\"theta\"]\n\t\t\tpsi = d[\"psi\"]\n\t\t\"\"\"\n\t\t# This makes no sense here! PAP 09/2011\n\t\tif theta > 90.0:\n\t\t\tphi += 180.0\n\t\t\ttheta = 180.0-theta\n\t\t\"\"\"\n\t\tphi *= deg2rad\n\t\ttheta *= deg2rad\n\t\tx = sin(theta) * sin(phi)\n\t\ty = sin(theta) * cos(phi)\n\t\tz = cos(theta)\n\n\t\treturn [x, y, z]\n\n\tN = len(agls1)\n\tif N != len(agls2):\n\t\tprint 'Both lists must have the same length'\n\t\treturn -1\n\tif N < 2:\n\t\tprint 'At least two orientations are required in each list'\n\t\treturn -1\n\tU1, U2 = [], []\n\tfor n in xrange(N):\n\t\tp1 = ori2xyz(agls1[n])\n\t\tp2 = ori2xyz(agls2[n])\n\t\tU1.append(p1)\n\t\tU2.append(p2)\n\n\t# compute all Suv with uv = {xx, xy, xz, yx, ..., zz}\n\tSuv = [0] * 9\n\tc = 0\n\tnbori = len(U1)\n\tfor i in xrange(3):\n\t\tfor j in xrange(3):\n\t\t\tfor s in xrange(nbori):\n\t\t\t\tSuv[c] += (U2[s][i] * U1[s][j])\n\t\t\tc += 1\n\n # create matrix N\n\tN = array([[Suv[0]+Suv[4]+Suv[8], Suv[5]-Suv[7], Suv[6]-Suv[2], Suv[1]-Suv[3]], \n\t\t [Suv[5]-Suv[7], Suv[0]-Suv[4]-Suv[8], Suv[1]+Suv[3], Suv[6]+Suv[2]], \n\t\t [Suv[6]-Suv[2], Suv[1]+Suv[3], -Suv[0]+Suv[4]-Suv[8], Suv[5]+Suv[7]],\n\t\t [Suv[1]-Suv[3], Suv[6]+Suv[2], Suv[5]+Suv[7], -Suv[0]-Suv[4]+Suv[8]]])\n\n # eigenvector corresponding to the most positive eigenvalue\n\tval, vec = linalg.eig(N)\n\tq0, qx, qy, qz = vec[:, val.argmax()]\n\n # create quaternion Rot matrix \n\tr = [q0*q0-qx*qx+qy*qy-qz*qz, 2*(qy*qx+q0*qz), 2*(qy*qz-q0*qx), 0.0,\n\t 2*(qx*qy-q0*qz), q0*q0+qx*qx-qy*qy-qz*qz, 2*(qx*qz+q0*qy), 0.0,\n\t 2*(qz*qy+q0*qx), 2*(qz*qx-q0*qy), q0*q0-qx*qx-qy*qy+qz*qz, 0.0]\n\t\n\tR = Transform(r)\n\tdictR = R.get_rotation('SPIDER')\n\n\treturn dictR['phi'], dictR['theta'], dictR['psi']", "def visualSignMap(phasemap1, phasemap2):\r\n\r\n if phasemap1.shape != phasemap2.shape:\r\n raise LookupError(\"'phasemap1' and 'phasemap2' should have same size.\")\r\n\r\n gradmap1 = np.gradient(phasemap1)\r\n gradmap2 = np.gradient(phasemap2)\r\n\r\n # gradmap1 = ni.filters.median_filter(gradmap1,100.)\r\n # gradmap2 = ni.filters.median_filter(gradmap2,100.)\r\n\r\n graddir1 = np.zeros(np.shape(gradmap1[0]))\r\n # gradmag1 = np.zeros(np.shape(gradmap1[0]))\r\n\r\n graddir2 = np.zeros(np.shape(gradmap2[0]))\r\n # gradmag2 = np.zeros(np.shape(gradmap2[0]))\r\n\r\n for i in range(phasemap1.shape[0]):\r\n for j in range(phasemap2.shape[1]):\r\n graddir1[i, j] = math.atan2(gradmap1[1][i, j], gradmap1[0][i, j])\r\n graddir2[i, j] = math.atan2(gradmap2[1][i, j], gradmap2[0][i, j])\r\n\r\n # gradmag1[i,j] = np.sqrt((gradmap1[1][i,j]**2)+(gradmap1[0][i,j]**2))\r\n # gradmag2[i,j] = np.sqrt((gradmap2[1][i,j]**2)+(gradmap2[0][i,j]**2))\r\n\r\n vdiff = np.multiply(np.exp(1j * graddir1), np.exp(-1j * graddir2))\r\n\r\n areamap = np.sin(np.angle(vdiff))\r\n\r\n return areamap", "def common_line_in3D(phiA,thetaA,phiB,thetaB):\n\n\tfrom math import pi, sqrt, cos, sin, asin, atan2\n\n\tpiOver=pi/180.0;\n\tph1 = phiA*piOver; \n\tth1 = thetaA*piOver; \n\tph2 = phiB*piOver; \n\tth2 = thetaB*piOver;\n\t\n \t#nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ;\n\t#ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ;\n\t#nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR);\n\n\n\tnx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2)\n\tny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2)\n\tnz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2)\n\n\tnorm = nx*nx + ny*ny + nz*nz\n \n\tif norm < 1e-5:\n\t\t#print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB\n\t\treturn 0.0, 0.0\n\n\tif nz<0: nx=-nx; ny=-ny; nz=-nz;\n\n\t#thetaCom = asin(nz/sqrt(norm))\n\tphiCom = asin(nz/sqrt(norm))\n\t#phiCom = atan2(ny,nx)\n\tthetaCom = atan2(ny, nx)\n\t\n\treturn phiCom*180.0/pi , thetaCom*180.0/pi", "def final_homography(pts1, pts2, feats1, feats2):\n\n #\n # Your code here\n #\n\n idxs1, idxs2 = find_matches(feats1, feats2)\n ransac_return = ransac(pts1[idxs1], pts2[idxs2])\n\n return ransac_return, idxs1, idxs2", "def compute_egocentric_delta(p1, r1, p2, r2):\n x1, y1, z1 = p1\n x2, y2, z2 = p2\n theta_1 = compute_heading_from_quaternion(r1)\n theta_2 = compute_heading_from_quaternion(r2)\n\n D_rho = math.sqrt((x1 - x2) ** 2 + (z1 - z2) ** 2)\n D_phi = (\n math.atan2(x2 - x1, -z2 + z1) - theta_1\n ) # counter-clockwise rotation about Y from -Z to X\n D_theta = theta_2 - theta_1\n\n return (D_rho, D_phi, D_theta)", "def compute_sign(k1, k2):\n\n def ordering_sign(permu, weights):\n \"\"\"Returns the exponent of the Koszul sign of the given\n permutation acting on the elements of degrees given by the\n list of weights\n\n \"\"\"\n sign_exp = 0\n for idx, j in enumerate(permu):\n to_add = [weights[permu.index(i)] for\n i in permu[idx + 1:] if i < j]\n sign_exp += weights[idx] * sum(to_add)\n return sign_exp % 2\n\n def action_sign(ordered_k1, ordered_weights):\n \"\"\"Given a ordered tuple [1,..,1, 2,...,2, ..., r,...,r]\n and weights [w_1, w_2, ..., w_{r+d}] of the same length, gives\n the koszul sign obtained by inserting from the left a weight 1\n operator between equal consecutive elements.\n\n \"\"\"\n sign_exp = 0\n for idx, (i, j) in enumerate(pairwise(ordered_k1)):\n if i == j:\n sign_exp += sum(ordered_weights[:idx + 1])\n return sign_exp % 2\n\n sign_exp = 0\n weights = [e.dimension % 2 for e in k2]\n inv_ordering_permu = [pair[0] for pair in\n sorted(enumerate(k1), key=itemgetter(1))]\n ordering_permu = tuple(inv_ordering_permu.index(i)\n for i in range(len(inv_ordering_permu)))\n sign_exp += ordering_sign(ordering_permu, weights)\n ordered_k1 = list(sorted(k1))\n ordered_weights = [weights[i] for i in inv_ordering_permu]\n sign_exp += action_sign(ordered_k1, ordered_weights)\n return (-1) ** sign_exp", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def angle_hkls(self, h1, h2):\n h1v = norm_vec((vec(*h1).T * self.Bmat)).T\n h2v = norm_vec((vec(*h2).T * self.Bmat)).T\n return np.around(np.arccos(h1v.T*h2v)[0, 0] * degrees, 3)", "def test_two_qubit_weyl_decomposition_iswap(self):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(np.pi / 4, np.pi / 4, 0)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def cphase(h1, h2):\n\n for h in (h1, h2):\n h.assert_ket_space()\n\n field = h1.base_field\n\n d = h1.dim()\n if h2.dim() != d:\n raise HilbertError('spaces must be of the same dimension')\n\n ret = (h1*h2).O.array()\n for (j, a) in enumerate(h1.index_iter()):\n for (k, b) in enumerate(h2.index_iter()):\n ret[{ h1: a, h1.H: a, h2: b, h2.H: b }] = field.fractional_phase(j*k, d)\n return ret", "def mat24_perm_from_heptads(h1, h2):\n # First find the special element of v h1 not contained in the octad\n v = 0\n for i in range(7):\n v ^= 1 << (h1[i] & 31)\n y = odd_syn(v)\n v = lsb24(v & y)\n \n # Find position y of element v in h1\n y = 0\n for i in range(7): \n y |= ((h1[i] != v) - 1) & i\n\n # Copy special element of h1 to position 8 of p1 and copy the other\n # elements of h1 to positions 0,...,6. Copy h2 similarly to p2\n p1 = h1[:7] + [None]*17\n p2 = h2[:7] + [None]*17\n p = [None] * 24\n p1[8] = p1[y]\n p1[y] = p1[6]\n p2[8] = p2[y]\n p2[y] = p2[6]\n\n # Complete p1 and p2 from heptad. Return error if any completion fails\n if mat24_complete_heptad(p1) | mat24_complete_heptad(p2):\n return None\n\n # If success, return p1**(-1) * p2\n for i in range(24):\n p[p1[i]] = p2[i]\n return p", "def phase_dist(phi1,phi2=None):\n shape = phi1.shape\n \n if phi2 is None:\n dist = np.abs(phi1).ravel()\n else:\n dist = np.abs(phi1-phi2).ravel()\n dist[dist>np.pi] = np.pi - dist[dist>np.pi]%np.pi\n return dist.reshape(shape)", "def deltaPhi(phi1, phi2):\n res = phi1 - phi2\n while res > math.pi:\n res -= 2*math.pi\n while res<-math.pi:\n res += 2*math.pi\n return res", "def get_distance(p1, p2):\n\n deg_rad = math.pi / 180\n\n dphi = p1[1] - p2[1]\n phim = 0.5 * (p1[1] + p2[1])\n dlam = p1[0] - p2[0]\n\n k1 = (111.13209 - 0.56605 * math.cos(2 * phim * deg_rad) + 0.00120 * \n math.cos(4 * phim * deg_rad))\n k2 = (111.41513 * math.cos(phim * deg_rad) - 0.09455 * \n math.cos(3 *phim * deg_rad) + 0.0012 * math.cos(5 * phim * deg_rad))\n\n return numpy.sqrt(k1**2 * dphi**2 + k2**2 * dlam**2)", "def diff_k_p2(p1, p2, t=0.):\n return p2", "def orientation(sign1L, sign2L):\n p_p = 0\n m_m = 0\n p_m = 0\n m_p = 0\n for index in range(len(sign1L)):\n sign1 = sign1L[index]\n sign2 = sign2L[index]\n if sign1 in [\"+\", \"-\"] and sign2 in [\"+\", \"-\"]:\n if sign1 == sign2:\n if sign1 == \"+\":\n p_p += 1\n elif sign1 == \"-\":\n m_m += 1\n else:\n if sign1 == \"+\" and sign2 == \"-\":\n p_m += 1\n elif sign1 == \"-\" and sign2 == \"+\":\n m_p += 1\n same_strand = p_p + m_m\n opposite_strand = p_m + m_p\n convergent = p_m\n divergent = m_p\n return p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent", "def phase_locking_value(z1, z2):\n\n assert len(z1) == len(z2), \"Signals must be same length! len(z1)=%d, len(z2)=%d\" % (len(z1), len(z2))\n N = len(z1)\n theta = np.angle(z2) - np.angle(z1)\n\n p = np.exp(complex(0, 1)*theta)\n plv = np.abs(p.sum()) / N\n\n return plv", "def rotacija_pravouglog_trougla_oko_hipotenuze(s2, s1):\r\n c = math.sqrt(s2 * s2 + s1 * s1)\r\n povrsina_trougla= (s2 * s1) / 2\r\n hc = (2 * povrsina_trougla) / c\r\n H1 = math.sqrt(s1 * s1 - hc * hc)\r\n H2 = math.sqrt(s2 * s2 - hc * hc)\r\n pi= 3.14\r\n povrsina = hc * pi * (s1 + s2)\r\n zapremina = (hc * hc * pi * (H1 + H2)) / 3\r\n return povrsina, zapremina", "def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])", "def testCalculateRotationDiff(self):\n # Test identity\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertEqual(result, 0.0)\n # Test arbitrary rotation\n rot1 = numpy.array(\n [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n rot2 = numpy.array(\n [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])\n transform1[0:3, 0:3] = numpy.matmul(transform1[0:3, 0:3], rot1)\n transform2[0:3, 0:3] = numpy.matmul(transform2[0:3, 0:3], rot2)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Order shouldn't matter\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Test when the angle is pi\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n transform2[0, 0] = -1.0\n transform2[1, 1] = -1.0\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n # It might wrap to -pi, so check the absolute value\n self.assertAlmostEqual(abs(result), numpy.pi, 8)\n # Test an extreme value\n transform2 = -1.0 * numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(abs(result), numpy.pi)", "def angle_difference(ang1,ang2,units):\n ang1r = angle_to_radians(ang1,units)\n ang2r = angle_to_radians(ang2,units)\n y = np.sin(ang2r-ang1r)\n x = np.cos(ang2r-ang1r)\n angdiffr = np.arctan2(y,x)\n return radians_to_angle(angdiffr,units)", "def get_diff_for_otu_maps(otu_map1, otu_map2):\r\n\r\n otus1 = set(otu_map1.keys())\r\n otus2 = set(otu_map2.keys())\r\n ids1 = set([x for otu in otus1 for x in otu_map1[otu]])\r\n ids2 = set([x for otu in otus2 for x in otu_map2[otu]])\r\n\r\n return ids1 - ids2, ids2 - ids1", "def intersection(self, pn1, pn2, h):\n #print \"intersectionection:\", pn1, pn2, h\n #print \"z: \", (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0]\n return (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0], h", "def diff_k_p1(p1, p2, t=0.):\n return p1", "def calc_dphi(phi1, phi2):\n twopi = 2.*np.pi\n # Map to 0..2pi range\n dphi = (phi1 - phi2) % twopi\n # Map pi..2pi --> -pi..0\n if is_array(dphi):\n dphi[dphi > np.pi] -= twopi\n elif dphi > np.pi:\n dphi -= twopi\n return dphi", "def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn", "def check_angles(self, force0, force1):\n\n assert type(force0) == type(force1), \"Error: force0 and force1 must be the same type.\"\n assert type(force0) == mm.HarmonicAngleForce, \"Error: forces must be HarmonicAngleForces\"\n\n n_angles0 = force0.getNumAngles()\n n_angles1 = force1.getNumAngles()\n\n dict0, dict1 = {}, {}\n\n i0, i1, i2, theta0, k0 = force0.getAngleParameters(0)\n #unit_theta = theta0.unit\n unit_theta = u.degrees\n #unit_k = k0.unit\n unit_k = u.kilojoules_per_mole/(u.degrees)**2\n\n for k in range(n_angles0):\n i0, i1, i2, theta0, k0 = force0.getAngleParameters(k)\n if (k0 / k0.unit) != 0.0: # Skip forces with strength 0.0\n i0, i1, i2 = reorder_angles(i0, i1, i2)\n dict0[i0, i1, i2] = ((theta0 / unit_theta, k0 / unit_k))\n\n for k in range(n_angles1):\n i0, i1, i2, theta0, k0 = force1.getAngleParameters(k)\n if (k0 / k0.unit) != 0.0: # Skip forces with strength 0.0\n i0, i1, i2 = reorder_angles(i0, i1, i2)\n dict1[i0, i1, i2] = ((theta0 / unit_theta, k0 / unit_k))\n\n keys0 = set(dict0.keys())\n keys1 = set(dict1.keys())\n logger.info(\"Angles0 - Angles1 = %s\" % (keys0.difference(keys1)))\n logger.info(\"Angles1 - Angles0 = %s\" % (keys1.difference(keys0)))\n diff_keys = keys0.symmetric_difference(keys1)\n assert diff_keys == set(), \"Systems have different HarmonicAngleForce entries: extra keys are: \\n%s\" % diff_keys\n\n for k, parameter_name in enumerate([\"theta0\", \"k0\"]):\n for (i0, i1, i2) in dict0.keys():\n val0 = dict0[i0, i1, i2][k]\n val1 = dict1[i0, i1, i2][k]\n if parameter_name=='theta0':\n assert compare(val0, val1), \"Error: Harmonic Angle (%d, %d, %d) has angle values of %f and %f degrees, respectively.\" % (i0, i1, i2, val0, val1)\n else:\n assert compare(val0, val1), \"Error: Harmonic Angle (%d, %d, %d) has force constant values of %f and %f kJ/(mol degree**2), respectively.\" % (i0, i1, i2, val0, val1)", "def get_angle(a: Keypoint, b: Keypoint, c: Keypoint) -> float:\n # get a vector with origin in (0,0) from points a and b by substracting Point a from Point b\n vector_a = keypoint_to_vector(a, b)\n vector_c = keypoint_to_vector(c, b)\n # https://de.wikipedia.org/wiki/Skalarprodukt => winkel phi = arccos(...)\n phi = np.arccos(np.dot(vector_a, vector_c) / (np.linalg.norm(vector_a) * np.linalg.norm(vector_c)))\n angle_left_opening = np.cross(vector_a, vector_c) < 0\n return phi if angle_left_opening else -phi", "def test_angle_angle_equivalent(pcff):\n expected = {\n \"K\": \"5.9863\",\n \"reference\": \"6\",\n \"Theta10\": \"116.0640\",\n \"Theta20\": \"116.0640\",\n }\n\n i = \"c5\"\n j = \"cp\"\n k = \"c_1\"\n l = \"c5\" # noqa: E741\n ptype, key, form, parameters = pcff.angle_angle_parameters(i, j, k, l)\n assert ptype == \"equivalent\"\n assert key == (\"cp\", \"cp\", \"c_1\", \"cp\")\n assert parameters == expected", "def calcSum3(data1, data2):\n \n #SxW @high resolution\n data111 = data1[0][0]\n #SxW @low resolution\n data112 = data1[0][1]\n #phi @high resolution\n data121 = data1[1][0]\n #phi @low resolution\n data122 = data1[1][1]\n \n #SxW @high resolution\n data211 = data2[0][0]\n #SxW @low resolution\n data212 = data2[0][1]\n #phi @high resolution\n data221 = data2[1][0]\n #phi @low resolution\n data222 = data2[1][1]\n \n \n \n swh = [data111[i] + data211[i] for i in range(0,len(data111))]\n swl = [data112[i] + data212[i] for i in range(0,len(data112))]\n \n phih = [data121[i] + data221[i] for i in range(0,len(data121))]\n phil = [data122[i] + data222[i] for i in range(0,len(data122))]\n \n \n return [[swh, swl], [phih, phil]]", "def angle_difference(θ1, θ2):\n ordinary_diff = (θ2 - θ1) % np.pi\n return (np.pi / 2) - np.abs(ordinary_diff - (np.pi / 2))", "def compute_homography(pts1, pts2):\n\n #\n # Your code here\n #\n p1 = np.c_[pts1, np.ones(len(pts1))]\n p2 = np.c_[pts2, np.ones(len(pts2))]\n \n A = np.zeros((2 * p1.shape[0], 9))\n\n for i in range(0, 2 * p1.shape[0], 2):\n\n z = p2[i // 2]\n z_ = p1[i // 2]\n\n A[i][:3] = z_\n A[i + 1][3:6] = z_\n A[i][6:] = -z_ * z[0]\n A[i + 1][6:] = -z_ * z[1]\n \n _, _, Vh = np.linalg.svd(A.T.dot(A))\n V = Vh.T\n H = np.reshape(V[:, -1], (3, 3))\n\n if np.linalg.norm(H) != 1.:\n H /= np.linalg.norm(H)\n \n return H", "def t_gcdist(lat1, lon1, lat2, lon2):\n raddeg = 180 / pi\n degrad = 1 / raddeg\n # convert latitude and longitude to radians\n lat1 = lat1 * degrad\n lat2 = lat2 * degrad\n in1 = np.flatnonzero(lon1 > 180)\n lon1[(in1 -1)] = lon1[(in1 -1)] - 360\n in2 = np.flatnonzero(lon2 > 180)\n lon2[(in2 -1)] = lon2[(in2 -1)] - 360\n lon1 = - lon1 * degrad\n lon2 = - lon2 * degrad\n # calculate some basic functions\n coslat1 = cos(lat1)\n sinlat1 = sin(lat1)\n coslat2 = cos(lat2)\n sinlat2 = sin(lat2)\n #calculate distance on unit sphere\n dtmp = cos(lon1 - lon2)\n dtmp = sinlat1 * sinlat2 + coslat1 * coslat2 * dtmp\n # check for invalid values due to roundoff errors\n in1 = np.flatnonzero(dtmp > 1.0)\n dtmp[(in1 -1)] = 1.0\n in2 = np.flatnonzero(dtmp < - 1.0)\n dtmp[(in2 -1)] = - 1.0\n # convert to meters for earth distance\n ad = acos(dtmp)\n d = (111.112) * raddeg * ad\n # now find heading\n hdgcos = (sinlat2 - sinlat1 * cos(ad)) / (sin(ad) * coslat1)\n # check value to be legal range\n in1 = np.flatnonzero(hdgcos > 1.0)\n hdgcos[(in1 -1)] = 1.0\n in2 = np.flatnonzero(hdgcos < - 1.0)\n hdgcos[(in2 -1)] = - 1.0\n hdg = acos(hdgcos) * raddeg\n # if longitude is decreasing then heading is between 180 and 360\n test = sin(lon2 - lon1)\n in1 = np.flatnonzero(test > 0.0)\n hdg[(in1 -1)] = 360 - hdg[(in1 -1)]\n return d, hdg", "def getangle(p1, p2):\n\treturn atan2( p2[1]-p1[1], p2[0]-p1[0] )", "def test_two_qubit_weyl_decomposition_swap(self):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(np.pi / 4, np.pi / 4, np.pi / 4)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def wigner_gaunt(l1, l2, m):\n pref = sqrt((2*l1 + 1)*(2*l2 + 1)/(4*pi))\n return np.array([pref*sqrt(2*lpp + 1)*float(wigner_3j(l1,l2,lpp,m,-m,0)*wigner_3j(l1,l2,lpp,0,0,0))\n for lpp in range(abs(l1-l2), l1+l2+1, 2)])", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def test_two_qubit_weyl_decomposition_bgate(self):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(np.pi / 4, np.pi / 8, 0)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def _diff_pot(a2,t2,d2,wair,temp,pres,ppot,airf,dhum):\n ph2 = _eq_pressure(0,0,0,a2,t2,d2)\n gi2 = _ice_g(0,0,t2,ppot)\n gv2 = _eq_vappot(0,0,0,a2,t2,d2)\n sh1 = -_air_f(0,1,0,airf,temp,dhum)\n si1 = -_ice_g(1,0,temp,pres)\n s1 = wair/airf*sh1 + (1-wair/airf)*si1\n sh2 = -_air_f(0,1,0,a2,t2,d2)\n si2 = -_ice_g(1,0,t2,ppot)\n s2 = wair/a2*sh2 + (1-wair/a2)*si2\n lhs = numpy.array([ppot, gi2, s1])\n rhs = numpy.array([ph2, gv2, s2])\n \n ph2_a = _eq_pressure(1,0,0,a2,t2,d2)\n ph2_t = _eq_pressure(0,1,0,a2,t2,d2)\n ph2_d = _eq_pressure(0,0,1,a2,t2,d2)\n gi2_t = _ice_g(1,0,t2,ppot)\n gv2_a = _eq_vappot(1,0,0,a2,t2,d2)\n gv2_t = _eq_vappot(0,1,0,a2,t2,d2)\n gv2_d = _eq_vappot(0,0,1,a2,t2,d2)\n sh2_a = -_air_f(1,1,0,a2,t2,d2)\n sh2_t = -_air_f(0,2,0,a2,t2,d2)\n sh2_d = -_air_f(0,1,1,a2,t2,d2)\n si2_t = -_ice_g(2,0,t2,ppot)\n s2_a = -wair/a2**2*(sh2 - a2*sh2_a - si2)\n s2_t = wair/a2*sh2_t + (1-wair/a2)*si2_t\n s2_d = wair/a2*sh2_d\n dlhs = numpy.array([[0.,0.,0.], [0.,gi2_t,0.], [0.,0.,0.]])\n drhs = numpy.array([[ph2_a,ph2_t,ph2_d], [gv2_a,gv2_t,gv2_d],\n [s2_a,s2_t,s2_d]])\n return lhs, rhs, dlhs, drhs", "def match_objects(coords1,coords2,tail1=(),tail2=(),accuracy=1.):\n acc2=accuracy**2\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n for j in range(np1):\n #dist=add.reduce((a1[:,j,NewAxis]-a2[:,:])**2)\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n if dist[i_min]<acc2:match[j]=i_min\n good=greater_equal(match,0)\n n1=compress(good,list(range(np1))) \n match=compress(good,match)\n a1=compress(good,a1)\n salida=list(a1)\n for i in range(nt1):\n if type(tail1[i][0])==type('si'):\n t=[]\n for j in n1: t.append(tail1[i][j])\n else:\n t=take(tail1[i],n1)\n salida.append(t)\n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n return salida", "def stitch(KPS1, KPS2, H1, H2, match): #---- stich image to previous one\r\n #--- projection image1 from plane to cylindrical ---\r\n total = np.minimum(match.shape[0],100); # total pairing number\r\n bin1 = match[0:total,0].astype(int); # feature no at image 1\r\n R1 = KPS1.keyz[bin1, 0]; # keypoint Y at image 1\r\n C1 = KPS1.keyz[bin1, 1]; # keypoint X at image 1\r\n V1, U1 = pano_tools.project_p2c_points(R1, C1, H1);\r\n #--- image 2 ---\r\n bin2 = match[0:total,1].astype(int); # feature no at image 2\r\n R2 = KPS2.keyz[bin2, 0]; # keypoint Y at image 2\r\n C2 = KPS2.keyz[bin2, 1]; # keypoint X at image 2\r\n Rc2 = H2[0]/2; Rp2= R2 - Rc2; \r\n Cc2 = H2[1]/2; Cp2= C2 - Cc2;\r\n #--- --- \r\n # {phi1,S1,TU1,TV1} = M*M matrix: which is derived by chosen 2 pairs \r\n # {phi0,S0,TU0,TV0} = scalar: which is initial guess by removing outlier\r\n # \r\n phi1,S1,TU1,TV1= pano_tools.derive_p2c_formula(U1,V1,Cp2,Rp2);\r\n seq,phi0,S0,TU0,TV0 = pano_tools.remove_ill_matched_pair(phi1,S1,TU1,TV1); \r\n #--- linear regression [not necessary] ---\r\n # U1X = U1[seq]; C2X = C2[seq]; V1X = V1[seq]; R2X = R2[seq]; \r\n # phi0,S0,TU0,TV0,Err= pano_tools.linear_regression(V1X,U1X,R2X,C2X, phi0,S0,TU0,TV0,H2)\r\n H2[3]= phi0; H2[4]= S0; H2[5]= TV0; H2[6]= TU0;", "def get_dihedral(p0,p1,p2,p3,unit):\n if unit == 'Ang':\n p0 = p0*0.529177249\n p1 = p1*0.529177249\n p2 = p2*0.529177249\n p3 = p3*0.529177249\n\n b0 = -1.0*(p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n # normalize b1 so that it does not influence magnitude of vector\n # rejections that come next\n b1 /= linalg.norm(b1)\n\n # vector rejections\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = b0 - dot(b0, b1)*b1\n w = b2 - dot(b2, b1)*b1\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = dot(v, w)\n y = dot(cross(b1, v), w)\n return degrees(arctan2(y, x))\n\n #q1 = subtract(p1,p0) # b - a \n #q2 = subtract(p2,p1) # c - b \n #q3 = subtract(p3,p2) # d - c\n #print(q1,q2)\n\n #q1_x_q2 = cross(q1,q2) \n #q2_x_q3 = cross(q2,q3)\n\n #n1 = q1_x_q2/sqrt(dot(q1_x_q2,q1_x_q2)) \n #n2 = q2_x_q3/sqrt(dot(q2_x_q3,q2_x_q3))\n\n #u1 = n2\n #u3 = q2/(sqrt(dot(q2,q2))) \n #u2 = cross(u3,u1)\n\n #cos_theta = dot(n1,u1)\n #sin_theta = dot(n1,u2)\n ## Calculate theta\n #theta = -atan2(sin_theta,cos_theta)\n ## it is different from atan2 from fortran math.atan2(y,x)\n #theta_deg = degrees(theta)\n #return(theta_deg)", "def angle_maps(gamma, delta, ci, cj, w, h, SDD, pxl_size, ph):\n gamma_map = np.empty((h,w)) # initialize detector gamma map\n delta_map = np.empty((h,w)) # initialize detector delta map\n d = np.empty((h,w)) # initialize detector distance map\n corr_i = np.empty((h,w)) # initialize flat detector correction map\n g_offset = (-1.08435537e-6*gamma**2 - \n 0.00084077357*gamma - \n 0.0128920777) # gamma offset (calibration)\n gamma += g_offset # correct gamma position\n d_offset = (1.7280529238e-6*delta**2 - \n 0.000700361461*delta - \n 0.00367551936) # delta offset (calibration)\n delta += d_offset # correct delta position\n nom_gamma = np.deg2rad(gamma) # convert nominal det angles to [rad]\n nom_delta = np.deg2rad(delta) # convert nominal det angles to [rad]\n GAM = np.array([[np.cos(nom_gamma),np.sin(nom_gamma),0], \n [-np.sin(nom_gamma), np.cos(nom_gamma),0], \n [0,0,1]]) # \\Gamma rotational matrix\n DEL = np.array([[1,0,0], # \\Delta rotational matrix\n [0,np.cos(nom_delta),-np.sin(nom_delta)], \n [0,np.sin(nom_delta),np.cos(nom_delta)]])\n rot_mat = np.matmul(GAM,DEL) # multiply rotational matrices\n for j in range(h):\n dz = (cj - j)*pxl_size # delta z (z-distance from det. center)\n for i in range(w):\n dx = (ci - i)*pxl_size # delta x (x-distance from det. center)\n di = np.sqrt(dx**2 + SDD**2 + dz**2) # sample-to-pixel distance\n dr = np.sqrt(dx**2 + dz**2) # center-to-pixel distance\n p = np.array([dx, SDD, dz]) # central pixel position at\n # zero angles in the lab coordinates\n (xp, yp, zp) = np.matmul(rot_mat, p) # central pixel position at\n # nominal detector angle\n gamma_map[j][i] = np.arctan(xp/yp) # map of gamma pixel values\n delta_map[j][i] = np.arcsin(zp/di) # map of delta pixel values\n d[j][i] = di # map of SDD distances\n corr_i[j][i] = 1/(np.cos(np.arctan(dr/SDD))) # flat det. corr.\n corr_d = np.power(d,2)/np.power(SDD,2) # flat det. corr.\n chi = np.arctan(np.tan(delta_map)/np.tan(gamma_map)) # map of chi\n Phor = (1 - \n np.power(np.sin(gamma_map),2)) # horizontal component of \n # polarization correction\n Pver = (1 - \n np.power(np.sin(delta_map)*np.cos(gamma_map),2)) # vertical comp.\n # of polarization correction\n P = ph*Phor + (1-ph)*Pver # polarization correction\n tth = np.arccos(np.cos(delta_map)*np.cos(gamma_map)) # 2th map\n L = 1/(np.sin(tth/2)*np.sin(tth)) # Lorentz correction\n flat = corr_i * corr_d # flat det. correction\n PL = P * L * flat # multiply corrrections\n return tth, chi, PL", "def ang_diff(self, theta1, theta2):\n\n return (theta1 - theta2 + np.pi) % (2 * np.pi) - np.pi", "def algdelta(alg1, alg2, *args):\n file_delta = ord(alg2[0]) - ord(alg1[0])\n rank_delta = ord(alg2[1]) - ord(alg1[1])\n return file_delta, rank_delta", "def delta_r(o1, o2):\n d_phi = o1.phi - o2.phi\n if d_phi < -pi: d_phi += pix2\n if d_phi > pi: d_phi -= pix2\n return hypot(o1.eta - o2.eta, d_phi)", "def get_theta(p1,p2):\r\n \r\n dy = p1[1] - p2[1]\r\n dx = p1[0] - p2[0]\r\n theta = atan2(dy,dx)\r\n return theta", "def distance_polytopes(Q1,Q2,ball=\"infinity\",solver=\"gurobi\"):\n Q1,Q2=pp.to_AH_polytope(Q1),pp.to_AH_polytope(Q2)\n n=Q1.n\n prog=MP.MathematicalProgram()\n zeta1=prog.NewContinuousVariables(Q1.P.H.shape[1],1,\"zeta1\")\n zeta2=prog.NewContinuousVariables(Q2.P.H.shape[1],1,\"zeta2\")\n delta=prog.NewContinuousVariables(n,1,\"delta\")\n prog.AddLinearConstraint(A=Q1.P.H,ub=Q1.P.h,lb=-np.inf*np.ones((Q1.P.h.shape[0],1)),vars=zeta1)\n prog.AddLinearConstraint(A=Q2.P.H,ub=Q2.P.h,lb=-np.inf*np.ones((Q2.P.h.shape[0],1)),vars=zeta2)\n prog.AddLinearEqualityConstraint( np.hstack((Q1.T,-Q2.T,np.eye(n))),Q2.t-Q1.t,np.vstack((zeta1,zeta2,delta)) )\n if ball==\"infinity\":\n delta_abs=prog.NewContinuousVariables(1,1,\"delta_abs\")\n prog.AddBoundingBoxConstraint(0,np.inf,delta_abs)\n prog.AddLinearConstraint(np.greater_equal( np.dot(np.ones((n,1)),delta_abs),delta,dtype='object' ))\n prog.AddLinearConstraint(np.greater_equal( np.dot(np.ones((n,1)),delta_abs),-delta,dtype='object' ))\n cost=delta_abs\n elif ball==\"l1\":\n delta_abs=prog.NewContinuousVariables(n,1,\"delta_abs\")\n prog.AddBoundingBoxConstraint(0,np.inf,delta_abs)\n prog.AddLinearConstraint(np.greater_equal( delta_abs,delta,dtype='object' ))\n prog.AddLinearConstraint(np.greater_equal( delta_abs,-delta,dtype='object' ))\n cost=np.dot(np.ones((1,n)),delta_abs)\n else:\n raise NotImplementedError\n if solver==\"gurobi\":\n prog.AddLinearCost(cost[0,0])\n result=gurobi_solver.Solve(prog,None,None)\n elif solver==\"osqp\":\n prog.AddQuadraticCost(cost[0,0]*cost[0,0])\n result=OSQP_solver.Solve(prog,None,None)\n else:\n prog.AddLinearCost(cost[0,0])\n result=MP.Solve(prog)\n if result.is_success():\n return np.sum(result.GetSolution(delta_abs)),\\\n np.dot(Q1.T,result.GetSolution(zeta1).reshape(zeta1.shape[0],1))+Q1.t,\\\n np.dot(Q2.T,result.GetSolution(zeta2).reshape(zeta2.shape[0],1))+Q2.t", "def semitone_difference(diatonic_pitch_a, diatonic_pitch_b):\n index_a = DiatonicFoundation.get_chromatic_distance(diatonic_pitch_a)\n index_b = DiatonicFoundation.get_chromatic_distance(diatonic_pitch_b)\n if index_a == -1 or index_b == -1:\n raise Exception('Illegal pitch specified')\n return index_a - index_b", "def copolymer_density(M_a, M_b, basis, L_A, L_B, fraction_A, fraction_B, xpts,\n R, spherical, basis2=None):\n nxpts = basis.shape[0]\n if not is_symmetric(M_a):\n raise ValueError(\"M_a must be symmetric\")\n if not is_symmetric(M_b):\n raise ValueError(\"M_b must be symmetric\")\n Lam_A, Q_A = np.linalg.eig(M_a)\n Lam_B, Q_B = np.linalg.eig(M_b)\n\n MMA = propagator(Lam_A, L_A, Q_A)\n MMB = propagator(Lam_B, L_B, Q_B)\n\n lam, alpha_B = sorted_eig(np.dot(MMB,MMA), top=True)\n if np.abs(np.imag(lam)) > 10**-9:\n raise ValueError(\"Complex eigenvalue. Answer shouldn't occolate\")\n lam, alpha_A = sorted_eig(np.dot(MMA,MMB), top=True)\n if np.abs(np.imag(lam)) > 10**-9:\n raise ValueError(\"Complex eigenvalue. Answer shouldn't occolate\")\n if np.max(np.abs(np.imag(alpha_A))) > 10**-8:\n raise ValueError(\"Complex eigenvector?\")\n if np.max(np.abs(np.imag(alpha_B))) > 10**-8:\n raise ValueError(\"Complex eigenvector?\")\n alpha_A = np.real(alpha_A)\n alpha_B = np.real(alpha_B)\n\n QEQ_A = QEQ(Q_A, alpha_B, Lam_A, L_A)\n QEQ_B = QEQ(Q_B, alpha_A, Lam_B, L_B)\n\n phi_a = np.zeros(nxpts)\n phi_b = np.zeros(nxpts)\n if spherical:\n if type(basis2) == type(None):\n basis2 = calcBasis2(xpts, basis_size, R)\n #seq = np.arange(basis.shape[1]) + 1\n for ix in range(0,nxpts):\n if spherical:\n #s = basis[ix,:] * (R/xpts[ix])\n #s = seq*spherical_jn(0,seq*np.pi*xpts[ix]/R)\n s = basis2[ix,:]\n else:\n s = basis[ix,:]\n phi_a[ix] = np.linalg.multi_dot([s,QEQ_A,s])\n phi_b[ix] = np.linalg.multi_dot([s,QEQ_B,s])\n\n phi_a = normalize(phi_a, fraction_A, xpts, R, spherical)\n phi_b = normalize(phi_b, fraction_B, xpts, R, spherical)\n\n return [phi_a, phi_b]", "def error_metric(phi_1, phi_2, spherical=False, xpts=None):\n if spherical:\n return sum(abs(phi_1-phi_2)*(xpts**2))/(2.0*sum(abs(phi_1)*(xpts**2)))\n else:\n return sum(abs(phi_1-phi_2))/(2.0*sum(phi_1))", "def trimdynamic_pe(records1, records2, args):\n for rec1, rec2 in izip(records1, records2):\n cutpos1 = 0\n cutpos2 = 0\n tmp_qual1 = [0 if x < args.q else 1 for x in rec1.letter_annotations['phred_quality']]\n tmp_qual1.append(0)\n jumps1 = [i for i, x in enumerate(tmp_qual1[:len(tmp_qual1) - 1]) if [tmp_qual1[i], tmp_qual1[i + 1]] == [1, 0]]\n if len(jumps1) == 0:\n cutpos1 = 0\n if len(jumps1) != 0:\n cutpos1 = numpy.max(jumps1) + 1\n rec1 = rec1[:cutpos1]\n tmp_qual2 = [0 if x < args.q else 1 for x in rec2.letter_annotations['phred_quality']]\n tmp_qual2.append(0)\n jumps2 = [i for i, x in enumerate(tmp_qual2[:len(tmp_qual2) - 1]) if [tmp_qual2[i], tmp_qual2[i + 1]] == [1, 0]]\n if len(jumps2) == 0:\n cutpos2 = 0\n if len(jumps2) != 0:\n cutpos2 = numpy.max(jumps2) + 1\n rec2 = rec2[:cutpos2]\n if args.r:\n rec1 = rec1.reverse_complement(name=True,id=True,description=True)\n rec2 = rec2.reverse_complement(name=True,id=True,description=True)\n if args.d:\n rec1.name += '/1'\n rec1.id += '/1'\n rec1.description += '/1'\n rec2.name += '/2'\n rec2.id += '/2'\n rec2.description += '/2'\n y1 = False\n y2 = False\n if len(rec1) >= args.m and numpy.mean(rec1.letter_annotations['phred_quality']) >= args.a:\n y1 = True\n if len(rec2) >= args.m and numpy.mean(rec2.letter_annotations['phred_quality']) >= args.a:\n y2 = True\n if y1 and y2:\n yield rec1, None, rec2, None, 'pe'\n if y1 and not y2:\n yield None, rec1, None, None, 'se1'\n if not y1 and y2:\n yield None, None, None, rec2, 'se2'", "def test_correct_center_order2(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 2, \"center\")\r\n assert np.allclose(coeffs, [-0.5, 0.5])\r\n assert np.allclose(shifts, [-1, 1])", "def _kendall_tau_diff(self, a: np.ndarray, b: np.ndarray, i) -> Tuple[int, int]:\n # compute ordering relation of the single points a[i] and b[i]\n # with all previous points of the sequences a and b, respectively\n a_pos = np.zeros(a[:i].size, dtype=int)\n a_pos[a[:i] > a[i]] = 1\n a_pos[a[:i] < a[i]] = -1\n b_pos = np.zeros(b[:i].size, dtype=int)\n b_pos[b[:i] > b[i]] = 1\n b_pos[b[:i] < b[i]] = -1\n diff_pos = np.dot(a_pos, b_pos).astype(float)\n\n # compute ordering relation of the single points a[i] and b[i]\n # with all later points of the sequences\n a_neg = np.zeros(a[i:].size, dtype=int)\n a_neg[a[i:] > a[i]] = 1\n a_neg[a[i:] < a[i]] = -1\n b_neg = np.zeros(b[i:].size, dtype=int)\n b_neg[b[i:] > b[i]] = 1\n b_neg[b[i:] < b[i]] = -1\n diff_neg = np.dot(a_neg, b_neg)\n\n return diff_pos, diff_neg", "def getSmoothnessABC(self,other):\n\t\tif not self.sectors == other.sectors:\n\t\t\traise ValueError(\"Sectors do not match\")\n\t\ttotalNzero = self.nZero + other.nZero\n\n\t\tDself = np.zeros((2*self.totalBins))\n\t\tDother = np.zeros((2*other.totalBins))\n\t\tZself = np.zeros((2*totalNzero, 2*self.totalBins))\n\t\tZother = np.zeros((2*totalNzero, 2*other.totalBins))\n\t\tfor s in range(self.nSect):\n\t\t\tstartSelf = self.borders[s]\n\t\t\tstartOther = other.borders[s]\n\t\t\tnBins = min(self.borders[s+1] - self.borders[s], other.borders[s+1] - other.borders[s])\n\t\t\tfor i in range(nBins):\n\t\t\t\tdelRe = self.reals[startSelf+i] - other.reals[startOther+i]\n\t\t\t\tdelIm = self.imags[startSelf+i] - other.imags[startOther+i]\n\t\t\t\tDself[2*(startSelf + i) ] = delRe\n\t\t\t\tDself[2*(startSelf + i)+1] = delIm\n\t\t\t\tDother[2*(startOther + i) ] = delRe\n\t\t\t\tDother[2*(startOther + i)+1] = delIm\n\t\t\t\tfor z in range(self.nZero):\n\t\t\t\t\tzeroVal = self.zeroModes[z][startSelf+i]\n\t\t\t\t\tZself[2*z ,2*(startSelf + i) ] = zeroVal\n\t\t\t\t\tZself[2*z+1,2*(startSelf + i)+1 ] = zeroVal\n\t\t\t\t\tZother[2*z ,2*(startOther + i) ] = zeroVal\n\t\t\t\t\tZother[2*z+1,2*(startOther + i)+1] = zeroVal\n\t\t\t\tz0 = self.nZero\n\t\t\t\tfor z in range(other.nZero):\n\t\t\t\t\tzeroVal = -other.zeroModes[z][startOther+i]\n\t\t\t\t\tZself[2*(z+z0) , 2*(startSelf + i) ] = zeroVal\n\t\t\t\t\tZself[2*(z+z0)+1 , 2*(startSelf + i)+1] = zeroVal\n\t\t\t\t\tZother[2*(z+z0) , 2*(startOther+ i) ] = zeroVal\n\t\t\t\t\tZother[2*(z+z0)+1, 2*(startOther+ i)+1] = zeroVal\n\t\tDCself = np.dot(Dself, self.comaInv)\n\t\tDCother = np.dot(Dother, other.comaInv)\n\n\t\tCself = np.dot(Dself, DCself)\n\t\tCother = np.dot(Dother, DCother)\n\t\tBself = np.dot(Zself, DCself)\n\t\tBother = np.dot(Zother, DCother)\n\t\tAself = np.dot(Zself, np.transpose(np.dot(Zself, self.comaInv)))\n\t\tAother = np.dot(Zother, np.transpose(np.dot(Zother, other.comaInv)))\n\t\treturn Aself+Aother, 2*(Bself + Bother), Cself + Cother # A factor 2 was missing inB", "def volume_similarity_pd(pd1,pd2):\n\tvolume_similarity = {}\n\n\t# print(\"aaaaa\")\n\n\t# union = vtk.vtkBooleanOperationPolyDataFilter()\n\t# union.SetOperationToDifference()\n\t# union.SetInputData(0,pd1)\n\t# union.SetInputData(1,pd2)\n\t# union.Update()\n\t# u = union.GetOutput()\n\n\t# massUnion = vtk.vtkMassProperties()\n\t# massUnion.SetInputData(u)\n\n\t# intersection = vtk.vtkBooleanOperationPolyDataFilter()\n\t# intersection.SetOperationToIntersection()\n\t# intersection.SetInputData(0,pd1)\n\t# intersection.SetInputData(1,pd2)\n\t# intersection.Update()\n\t# i = intersection.GetOutput()\n\t# massIntersection = vtk.vtkMassProperties()\n\t# massIntersection.SetInputData(i)\n\n\t# # metrics\n\t# tqdm.write(\"intersection vol: {:.2f}\".format(massIntersection.GetVolume()))\n\t# tqdm.write(\"union vol: {:.2f}\".format(massUnion.GetVolume()))\n\n\t# volume_similarity[\"jaccard\"] = 1 - massIntersection.GetVolume()/massUnion.GetVolume()\n\n\t# tqdm.write(\"Jaccard distance: {:.2f}\".format(volume_similarity[\"jaccard\"]))\n\n\thausdorffDistFilter = vtk.vtkHausdorffDistancePointSetFilter()\n\thausdorffDistFilter.SetInputData(0, pd1)\n\thausdorffDistFilter.SetInputData(1, pd2)\n\thausdorffDistFilter.Update()\n\n\tvolume_similarity[\"hausdorff\"] = hausdorffDistFilter.GetHausdorffDistance()\n\tvolume_similarity[\"relative0\"] = hausdorffDistFilter.GetRelativeDistance()[0]\n\tvolume_similarity[\"relative1\"] = hausdorffDistFilter.GetRelativeDistance()[1]\n\ttqdm.write(\"Hausdorff distance: {:.2f} mm\".format(volume_similarity[\"hausdorff\"]))\n\ttqdm.write(\"Relative distance from pd1 to pd2: {:.2f} mm\".format(volume_similarity[\"relative0\"]))\n\ttqdm.write(\"Relative distance from pd2 to pd1: {:.2f} mm\".format(volume_similarity[\"relative1\"]))\n\n\treturn volume_similarity, hausdorffDistFilter.GetOutput(0), hausdorffDistFilter.GetOutput(1)", "def separations(self, g1, g2):\n\n results = {}\n com1 = CenterOfMass(self.galaxies[g1], 2)\n com2 = CenterOfMass(self.galaxies[g2], 2)\n com1_p = com1.com_p()\n com2_p = com2.com_p()\n com1_v = com1.com_v(com1_p)\n com2_v = com2.com_v(com2_p)\n \n results['pos_xyz'] = com2_p - com1_p\n results['vel_xyz'] = com2_v - com1_v\n results['r'] = np.round(norm(results['pos_xyz']), 2)\n results['r_hat'] = np.round(results['pos_xyz'] / results['r'], 2) # unit vector\n results['vel_mag'] = np.round(norm(results['vel_xyz']), 2)\n results['v_radial'] = np.round(np.dot(results['r_hat'], results['vel_xyz']), 2)\n results['v_tangential'] = np.round(np.cross(results['r_hat'], results['vel_xyz']), 2)\n results['v_tan_mag'] = np.round(norm(results['v_tangential']), 2)\n\n return results", "def _anti_commuting_products(q_1: Q, q_2: Q) -> Dict:\n\n s_x, s_y, s_z = q_1.x, q_1.y, q_1.z\n q_2_x, q_2_y, q_2_z = q_2.x, q_2.y, q_2.z\n\n dif_dict = {\n \"yz-zy\": s_y * q_2_z - s_z * q_2_y,\n \"zx-xz\": s_z * q_2_x - s_x * q_2_z,\n \"xy-yx\": s_x * q_2_y - s_y * q_2_x,\n \"zy-yz\": -s_y * q_2_z + s_z * q_2_y,\n \"xz-zx\": -s_z * q_2_x + s_x * q_2_z,\n \"yx-xy\": -s_x * q_2_y + s_y * q_2_x,\n }\n\n return dif_dict", "def subtract_poly(a_remote, b_remote, m):\n # Copy the two arrays\n a = a_remote.copy()\n b = b_remote.copy()\n\n # Make the length the same\n sanitize_arrays(a, b)\n\n result = []\n # Subtract numbers with same index and modulo m them. Add to result\n for x, y in zip(a, b):\n result.append((x - y) % m)\n\n return clear_leading_zeroes(result)", "def associate(first_list, second_list, offset, max_difference):\n ## obatin all keys\n first_keys = list(first_list)\n second_keys = list(second_list)\n potential_matches = [(abs(a - (b + offset)), a, b)\n for a in first_keys\n for b in second_keys\n if abs(a - (b + offset)) < max_difference]\n potential_matches.sort()\n matches = []\n for diff, a, b in potential_matches:\n if a in first_keys and b in second_keys:\n first_keys.remove(a)\n second_keys.remove(b)\n matches.append((a, b))\n \n matches.sort()\n return matches", "def angle_difference(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['angle_difference']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for label in self.uuid_map:\n if label == 'LSTATE' or 'MAG' in label:\n continue\n distillate_label = get_distillate_label([label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_ref_label = \"{0} {1}\".format(label, self.ref_name)\n dep_ref_name = fields['deps'][0]\n dep_ref_uuid = self.reference_uuid_map[label]\n dep_label = \"{0} {1}\".format(label, self.name)\n dep_name = fields['deps'][1]\n dep_uuid = self.uuid_map[label]\n deps = [[dep_ref_label, dep_ref_name, dep_ref_uuid], [dep_label, dep_name, dep_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}/{3}\".format(self.location, self.ref_name, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"ANGLE-DIFF\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[label] = emitted[-2][-36:]\n\n filename = \"{0}/ANG-DIFF_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def distcalc(z,h=0.70,omegalambda=0.7,omegam=0.3,omegak=0.0):\n\n H0 = 100 * h # this is in units of km/s/Mpc\n\n H0freq = H0 * constants.kilo/(constants.mega * constants.parsec) # this is H0 is units of Hz\n \n hubbletime = 1.0/H0freq # in seconds\n hubbletimeyr = hubbletime / constants.year\n\n #hubble distance\n dh = constants.c / H0freq # in meters\n\n #now i can calculate the comoving distance (line of sight) using hogg eqn 15\n dc = dh * integrate.quad(dcintegrand,0,z,(omegalambda,omegam,omegak))[0]\n\n #now i can find the transverse comoving distance using hogg eqn 16\n if omegak == 0:\n dm = dc\n elif omegak > 0:\n dm = dh/np.sqrt(omegak) * np.sinh(dc * np.sqrt(omegak) / dh)\n else:\n dm = dh/np.sqrt(abs(omegak)) * np.sin(dc * np.sqrt(abs(omegak)) / dh)\n\n\n #now i will calculate the angular diameter distance (hogg eqn 18)\n da = dm/(1+z)\n \n #now i will calculate scale in kpc/arcsec, since this is commonly used\n scale = da * constants.arcsec / (constants.kilo * constants.parsec)\n\n #now i will calculate the luminosity distance (hog eqn 21)\n dl = (1+z)*dm\n \n #now i will calculate lookback time and \n #time from the begining of the universe to that redshift using hogg eqn 30\n \n tlookback = hubbletimeyr * integrate.quad(timeintegrand,0,z,(omegalambda,omegam,omegak))[0]\n \n tz = hubbletimeyr * integrate.quad(timeintegrand,z,np.inf,(omegalambda,omegam,omegak))[0]\n \n #all sky co-moving volume out to redshift z (hogg eqn 30)\n if omegak == 0:\n vc = 4 * np.pi * dm**3 / 3\n elif omegak > 0:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsinh( np.sqrt(omegak) * dm / dh ) / np.sqrt(omegak) )\n else:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsin( np.sqrt(abs(omegak)) * dm / dh ) / np.sqrt(abs(omegak)) )\n\n #for output, i will make a dictionary\n output = dict(dh=dh, dc=dc, dm=dm, da=da, scale=scale, dl=dl, tlookback = tlookback, tz=tz, vc=vc)\n\n return output", "def get_phi_kappa_omega(self, angles):\n (phi) = angles[0]\n (kappa) = angles[1]\n (omega) = angles[2]\n return (phi, kappa, omega)", "def compute_orientation(x,y,lx,ly,nfil):\n # number of molecules\n natoms = len(x)\n nmol = natoms/nfil\n # allocate aray for results\n phi = np.zeros((natoms), dtype = np.float64)\n tx = np.zeros((natoms), dtype = np.float64)\n ty = np.zeros((natoms), dtype = np.float64)\n # loop over all polymers\n k = 0\n for i in range(nmol):\n for j in range(nfil):\n if j == 0:\n x1 = x[k]\n y1 = y[k]\n x2 = x[k+1]\n y2 = y[k+1]\n elif j == nfil-1:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k]\n y2 = y[k]\n else:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k+1]\n y2 = y[k+1]\n # compute nearest neighbor\n dx = neigh_min(x2-x1,lx)\n dy = neigh_min(y2-y1,ly)\n # compute angle using atan2\n pi = math.atan2(dy,dx)\n phi[k] = pi\n tx[k] = dx / np.sqrt(dx**2 + dy**2)\n ty[k] = dy / np.sqrt(dx**2 + dy**2)\n # increment k\n k = k + 1\n return phi, tx, ty", "def report_sim_difference(sim0: rebound.Simulation, sim1: rebound.Simulation, \n object_names: List[str], verbose: bool=False) -> \\\n Tuple[np.array, np.array]:\n # Extract configuration arrays for the two simulations\n cfg0: np.array = sim_cfg_array(sim0, object_names)\n cfg1: np.array = sim_cfg_array(sim1, object_names)\n \n # Convert both arrays to heliocentric coordinates\n cfg0 = cfg0 - cfg0[0:1,:]\n cfg1 = cfg1 - cfg1[0:1,:]\n\n # Displacement of each body to earth\n earth_idx: int = object_names.index('Earth')\n q0: np.array = cfg0[:, 0:3] - cfg0[earth_idx, 0:3]\n q1: np.array = cfg1[:, 0:3] - cfg1[earth_idx, 0:3]\n \n # Right Ascension and Declination\n r0, asc0, dec0 = cart_to_sph(q0)\n r1, asc1, dec1 = cart_to_sph(q1)\n\n # Error in asc and dec; convert from radians to arcseconds\n asc_err: np.array = np.degrees(np.abs(asc1-asc0)) * 3600\n dec_err: np.array = np.degrees(np.abs(dec1-dec0)) * 3600\n\n # Take differences\n cfg_diff: np.array = (cfg1 - cfg0)\n pos_diff: np.array = cfg_diff[:, 0:3]\n vel_diff: np.array = cfg_diff[:, 3:6]\n\n # Error in position and velocity in heliocentric coordinates; skip the sun\n pos_err: np.array = np.linalg.norm(pos_diff, axis=1)\n pos_err_den: np.array = np.linalg.norm(cfg0[:, 0:3], axis=1)\n pos_err_den[0] = 1.0\n pos_err_rel: np.array = pos_err / pos_err_den\n vel_err: np.array = np.linalg.norm(vel_diff, axis=1)\n vel_err_den: np.array = np.linalg.norm(cfg0[:, 3:6], axis=1)\n vel_err_den[0] = 1.0\n vel_err_rel: np.array = vel_err / vel_err_den\n\n if verbose:\n print(f'\\nPosition difference - absolute & relative')\n print(f'(Angle errors in arcseconds, position in AU)')\n print(f'Body : Phi : Theta : Pos AU : Pos Rel : Vel Rel')\n object_names_short: List[str] = [nm.replace(' Barycenter', '') for nm in object_names]\n for i, nm in enumerate(object_names_short):\n print(f'{nm:10} : {asc_err[i]:5.2e}: {dec_err[i]:5.2e}: {pos_err[i]:5.2e}: '\n f'{pos_err_rel[i]:5.2e}: {vel_err_rel[i]:5.2e}')\n print(f'Overall : {rms(asc_err):5.2e}: {rms(dec_err):5.2e}: {rms(pos_err):5.2e}: '\n f'{rms(pos_err_rel):5.2e}: {rms(vel_err_rel):5.2e}')\n\n # Extract orbital element arrays from the two simulations\n elt0: np.array = sim_elt_array(sim0, object_names[1:])\n elt1: np.array = sim_elt_array(sim1, object_names[1:])\n\n # Take differences\n elt_diff: np.array = (elt1 - elt0)\n # Angle differences are mod two pi\n two_pi: float = 2.0 * np.pi\n elt_diff[:,2:] = (elt_diff[:,2:] +np.pi ) % two_pi - np.pi\n \n # Compute RMS difference by orbital element\n elt_rms: np.array = rms(elt_diff, axis=0)\n elt_err: np.array = np.abs(elt_diff)\n\n # Names of selected elements\n elt_names: List[str] = ['a', 'e', 'inc', 'Omega', 'omega', 'f', 'M', 'pomega', 'long']\n\n # Report RMS orbital element differences\n if verbose:\n print(f'\\nOrbital element errors:')\n print(f'elt : RMS : worst : max_err : HRZN : REB')\n for j, elt in enumerate(elt_names):\n idx = np.argmax(elt_err[:, j])\n worse = object_names_short[idx+1]\n print(f'{elt:6} : {elt_rms[j]:5.2e} : {worse:10} : {elt_err[idx, j]:5.2e} : '\n f'{elt0[idx, j]:11.8f} : {elt1[idx, j]:11.8f}')\n print(f'RMS (a, e, inc) = {rms(elt_diff[:,0:3]):5.2e}')\n print(f'RMS (f, M, pomega, long) = {rms(elt_diff[:,5:9]):5.2e}') \n\n # One summary error statistic\n ang_err: np.array = rms(np.array([asc_err, dec_err]))\n \n # Return the RMS position error and angle errors\n return pos_err, ang_err", "def kmer_distance(seq1,seq2,k=3):\n seq1_set = set(count_kmers(seq1,k).keys())\n seq2_set = set(count_kmers(seq2,k).keys())\n union_seq = seq1_set.union(seq2_set)\n dissimilarity = seq1_set ^ seq2_set\n distance = len(dissimilarity)/len(union_seq)\n print(dissimilarity)\n return distance", "def delta_e_76(lab1, lab2):\n\n l1, a1, b1 = lab1\n l2, a2, b2 = lab2\n return (l1 - l2) ** 2 + (a1 - a2) ** 2 + (b1 - b2) ** 2", "def aitken_delta2 ( xl , xl1 , xl2 , *others ) :\n \n dd = ( xl - xl1 ) - ( xl1 - xl2 )\n \n if not dd : return None\n\n return xl - ( xl - xl1 ) **2 / dd", "def hamming_distance(lhs,rhs):\n return len([(x,y) for x,y in zip(lhs,rhs) if x !=y])", "def angle_maps_slits(gamma, delta, ci, cj, w, h, SDD, pxl_size, ph, Rs):\n gamma_map = np.empty((h,w)) # initialize detector gamma map\n delta_map = np.empty((h,w)) # initialize detector delta map\n d = np.empty((h,w)) # initialize detector distance map\n corr_i = np.empty((h,w)) # initialize flat detector correction map\n g_offset = (-1.08435537e-6*gamma**2 - \n 0.00084077357*gamma - \n 0.0128920777) # gamma offset (calibration)\n gamma += g_offset # correct gamma position\n d_offset = (1.7280529238e-6*delta**2 - \n 0.000700361461*delta - \n 0.00367551936) # delta offset (calibration)\n delta += d_offset # correct delta position\n nom_gamma = np.deg2rad(gamma) # convert nominal det angles to [rad]\n nom_delta = np.deg2rad(delta) # convert nominal det angles to [rad]\n GAM = np.array([[np.cos(nom_gamma),np.sin(nom_gamma),0], \n [-np.sin(nom_gamma), np.cos(nom_gamma),0], \n [0,0,1]]) # \\Gamma rotational matrix\n DEL = np.array([[1,0,0], # \\Delta rotational matrix\n [0,np.cos(nom_delta),-np.sin(nom_delta)], \n [0,np.sin(nom_delta),np.cos(nom_delta)]])\n rot_mat = np.matmul(GAM,DEL) # multiply rotational matrices\n for j in range(h):\n dz = (cj - j)*pxl_size # delta z (z-distance from det. center)\n for i in range(w):\n dx = (ci - i)*pxl_size # delta x (x-distance from det. center)\n di = np.sqrt(dx**2 + (SDD + Rs)**2 + \n dz**2) # sample-to-pixel distance\n dr = np.sqrt(dx**2 + dz**2) # center-to-pixel distance\n s = np.array([0, Rs, 0]) # sample-to-slit vector\n (xs, ys, zs) = np.matmul(rot_mat, s) # rotate s vector\n p = np.array([dx, (SDD + Rs), dz]) # central pixel position at\n # zero angles in the lab coordinates\n (xp, yp, zp) = np.matmul(rot_mat, p) # central pixel position at\n # nominal detector angle\n dps = np.sqrt((xp - xs)**2 + (yp - ys)**2 + \n (zp - zs)**2) # pixel-to-slit distance\n gamma_map[j][i] = np.arctan((xp - xs)/(yp - ys)) # gamma map\n delta_map[j][i] = np.arcsin((zp - zs)/dps) # delta map\n d[j][i] = di # map of SDD distances\n corr_i[j][i] = 1/(np.cos(np.arctan(dr/SDD))) # flat det. corr.\n corr_d = np.power(d,2)/np.power(SDD,2) # flat det. corr.\n chi = np.arctan(np.tan(delta_map)/np.tan(gamma_map)) # map of chi\n Phor = (1 - \n np.power(np.sin(gamma_map),2)) # horizontal component of \n # polarization correction\n Pver = (1 - \n np.power(np.sin(delta_map)*np.cos(gamma_map),2)) # vertical comp.\n # of polarization correction\n P = ph*Phor + (1-ph)*Pver # polarization correction\n tth = np.arccos(np.cos(delta_map)*np.cos(gamma_map)) # 2th map\n L = 1/(np.sin(tth/2)*np.sin(tth)) # Lorentz correction\n flat = corr_i * corr_d # flat det. correction\n PL = P * L * flat # multiply corrrections\n return tth, chi, PL", "def optimal_angle_and_tilt(sensors_metadata_clean, latitude, sun_properties, Max_Isol, panel_properties):\n # calculate panel tilt angle (B) for flat roofs (tilt < 5 degrees), slope roofs and walls.\n optimal_angle_flat = calc_optimal_angle(180, latitude, sun_properties.trr_mean) # assume surface azimuth = 180 (N,E), south facing\n sensors_metadata_clean['tilt']= np.vectorize(acos)(sensors_metadata_clean['Zdir']) #surface tilt angle in rad\n sensors_metadata_clean['tilt'] = np.vectorize(degrees)(sensors_metadata_clean['tilt']) #surface tilt angle in degrees\n sensors_metadata_clean['B'] = np.where(sensors_metadata_clean['tilt'] >= 5, sensors_metadata_clean['tilt'],\n degrees(optimal_angle_flat)) # panel tilt angle in degrees\n\n # calculate spacing and surface azimuth of the panels for flat roofs\n module_length = panel_properties['module_length']\n optimal_spacing_flat = calc_optimal_spacing(sun_properties, optimal_angle_flat, module_length)\n sensors_metadata_clean['array_s'] = np.where(sensors_metadata_clean['tilt'] >= 5, 0, optimal_spacing_flat)\n sensors_metadata_clean['surface_azimuth'] = np.vectorize(calc_surface_azimuth)(sensors_metadata_clean['Xdir'],\n sensors_metadata_clean['Ydir'],\n sensors_metadata_clean['B']) # degrees\n\n # calculate the surface area required to install one pv panel on flat roofs with defined tilt angle and array spacing\n if panel_properties['type'] == 'PV':\n module_width = module_length # for PV\n else:\n module_width = panel_properties['module_area']/module_length # for FP, ET\n module_flat_surface_area = module_width * (sensors_metadata_clean.array_s / 2 + module_length * cos(optimal_angle_flat))\n area_per_module = module_width * module_length\n\n # calculate the pv/solar collector module area within the area of each sensor point\n sensors_metadata_clean['area_installed_module'] = np.where(sensors_metadata_clean['tilt'] >= 5, sensors_metadata_clean.AREA_m2,\n area_per_module * (sensors_metadata_clean.AREA_m2 / module_flat_surface_area))\n\n # categorize the sensors by surface_azimuth, B, GB\n result = np.vectorize(calc_categoriesroof)(sensors_metadata_clean.surface_azimuth, sensors_metadata_clean.B,\n sensors_metadata_clean.total_rad_Whm2, Max_Isol)\n sensors_metadata_clean['CATteta_z'] = result[0]\n sensors_metadata_clean['CATB'] = result[1]\n sensors_metadata_clean['CATGB'] = result[2]\n return sensors_metadata_clean", "def kspace_cholesky_solve_(self, other):\n n_points = np.max(np.array([n_lattice(self), n_lattice(other)]), axis = 0)\n self_k = transform(self, np.fft.fftn, n_points = n_points)\n other_k = transform(other, np.fft.fftn, n_points = n_points)\n\n ret = tmat()\n ret.load_nparray(np.ones((self_k.coords.shape[0],self_k.blockshape[0], other_k.blockshape[1]), dtype = np.complex), self_k.coords, safemode = False)\n #ret = self_k*1.0\n ret.blocks*=0.0\n\n #ret.blocks[:-1] = np.einsum(\"ijk,ikl->ijl\", self_k.blocks[:-1], other_k.blocks[:-1], optimize = True)\n\n for i in np.arange(len(self_k.blocks)-1):\n \n #print(np.max(np.abs(self_k.blocks[i].T-self_k.blocks[i])))\n #assert(np.max(np.abs(self_k.blocks[i].T-self_k.blocks[i]))<1e-10), \"not symmetric\"\n #assert(np.linalg.norm(self_k.blocks[i].T-self_k.blocks[i])<1e-10), \"not symmetric\"\n Mk = np.linalg.cholesky(self_k.blocks[i])\n yk = np.linalg.solve(Mk, other_k.blocks[i])\n\n\n ret.blocks[i] = np.linalg.solve(Mk.conj().T, yk)\n\n ret = transform(ret, np.fft.ifftn, n_points = n_points, complx = False)\n return ret", "def two_theta_hkl(self, H, K, L):\n return self.unit_cell.two_theta((H, K, L), self.wavelength, deg=True)", "def _kendall_tau_diff(self, a, b, i):\n # compute ordering relation of the single points a[i] and b[i]\n # with all previous points of the sequences a and b, respectively\n a_pos = np.zeros(a[:i].size, dtype=int)\n a_pos[a[:i] > a[i]] = 1\n a_pos[a[:i] < a[i]] = -1\n b_pos = np.zeros(b[:i].size, dtype=int)\n b_pos[b[:i] > b[i]] = 1\n b_pos[b[:i] < b[i]] = -1\n diff_pos = np.dot(a_pos, b_pos).astype(float)\n\n # compute ordering relation of the single points a[i] and b[i]\n # with all later points of the sequences\n a_neg = np.zeros(a[i:].size, dtype=int)\n a_neg[a[i:] > a[i]] = 1\n a_neg[a[i:] < a[i]] = -1\n b_neg = np.zeros(b[i:].size, dtype=int)\n b_neg[b[i:] > b[i]] = 1\n b_neg[b[i:] < b[i]] = -1\n diff_neg = np.dot(a_neg, b_neg)\n\n return diff_pos, diff_neg", "def even_angles_cd(delta, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'P', phiEQpsi='Minus'):\n\tfrom math import pi, sqrt, cos, acos\n\tangles = []\n\tif (method == 'P'):\n\t\ttemp = Util.even_angles(delta, theta1, theta2, phi1, phi2)\n\t\t#\t\t phi, theta, psi\n\t\tfor i in xrange(len(temp)/3): angles.append([temp[3*i],temp[3*i+1],temp[3*i+2]]);\n\telse: #elif (method == 'S'):\n\t\tDeltaz = cos(theta2*pi/180.0)-cos(theta1*pi/180.0)\n\t\ts = delta*pi/180.0\n\t\tNFactor = 3.6/s\n\t\twedgeFactor = abs(Deltaz*(phi2-phi1)/720.0)\n\t\tNumPoints = int(NFactor*NFactor*wedgeFactor)\n\t\tangles.append([phi1, theta1, 0.0])\n\t\tz1 = cos(theta1*pi/180.0); \tphi=phi1 # initialize loop\n\t\tfor k in xrange(1,(NumPoints-1)):\n\t\t\tz=z1 + Deltaz*k/(NumPoints-1)\n\t\t\tr= sqrt(1-z*z)\n\t\t\tphi = phi1+(phi + delta/r -phi1)%(abs(phi2-phi1))\n\t\t\t#[k, phi,180*acos(z)/pi, 0]\n\t\t\tangles.append([phi, 180*acos(z)/pi, 0.0])\n\t\t#angles.append([p2,t2,0]) # This is incorrect, as the last angle is really the border, not the element we need. PAP 01/15/07\n\tif (phiEQpsi == 'Minus'):\n\t\tfor k in xrange(len(angles)): angles[k][2] = (720.0 - angles[k][0])%360.0\n\tif( theta2 == 180.0 ): angles.append( [0.0, 180.0, 0.0] )\n\n\treturn angles", "def test_two_qubit_weyl_decomposition_cnot(self):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(np.pi / 4, 0, 0)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def CalculateCompassDifference(a, b):\n delta = NormalizeAngle(a - b)\n return delta", "def heading_difference(self, other_heading):\n diff = abs(self.heading - other_heading)\n if diff > 180:\n diff = 360 - diff\n return diff", "def addVectors(r1, r2):\n \"\"\" [0] = angle, [1] = lenght \"\"\"\n x = (math.sin(r1[0]) * r1[1]) + (math.sin(r2[0]) * r2[1])\n y = (math.cos(r1[0]) * r1[1]) + (math.cos(r2[0]) * r2[1])\n \n angle = 0.5 * math.pi - math.atan2(y, x)\n length = math.hypot(x, y)\n\n return (angle, length)", "def fringes_morlet_phase(m1,m2, quasi_pi=False):\n ### cross spectrum\n cross_spec = np.conj(m1.cwt)*m2.cwt\n phi = np.angle(cross_spec)\n if quasi_pi:\n phi = np.mod(phi + np.pi/2, 2*np.pi)\n weight = abs(m1.cwt)*abs(m2.cwt)\n phase = np.sum(phi*weight, axis=0)/np.sum(weight, axis=0)\n if quasi_pi:\n phase -= np.pi/2\n return phase", "def QuatAngleDiff(Wxyz1, Wxyz2):\n q1 = rigmech.QuatNormalize(rigmech.toQuat(Wxyz1))\n q2 = rigmech.QuatNormalize(rigmech.toQuat(Wxyz2))\n q3 = rigmech.QuatMult(q2, rigmech.QuatConj(q1))\n WxyzChange = q3[1:] * np.sign(q3[0])\n return WxyzChange", "def couple_so3(L1,L2,orbital=False):\n imax=int(2*min(L1,L2))\n if orbital==True:\n if (L1+L2)%1==0:\n L3set=[int(L1+L2-i) for i in range(0,imax+1)]\n else:\n print \"error message\"\n L3set=[]\n else:\n L3set=[L1+L2-i for i in range(0,imax+1)] \n return L3set", "def quaternion_difference(q1, q2):\n q1_abs = np.ndarray(4)\n q1_con = np.ndarray(4)\n q1_inv = np.ndarray(4)\n\n q1_con[0] = q1[0]\n q1_con[1] = -q1[1]\n q1_con[2] = -q1[2]\n q1_con[3] = -q1[3]\n\n functions.mju_mulQuat(q1_abs, q1, q1_con)\n q1_abs[0] += q1_abs[1] + q1_abs[2] + q1_abs[3]\n q1_inv = q1_con / q1_abs[0]\n\n q_diff = np.ndarray(4)\n functions.mju_mulQuat(q_diff, q2, q1_inv)\n\n return q_diff", "def ROT(hist1: np.ndarray, hist2: np.ndarray, *args):\n\n rot = ROTSolver(hist1, hist2)\n _, coupling = rot.solve(plot=False)\n\n return coupling", "def compute_subspace_angles(S1, S2):\n # Check the if the input arrays are 1D or 2D\n if S1.ndim == 1:\n # mat1 = np.reshape(S1, (1,S1.size))\n mat1 = np.reshape(S1, (S1.size, 1))\n elif S1.ndim == 2:\n mat1 = S1\n else:\n raise ValueError('The function is intended only to handle 1D and 2D numpy arrays')\n if S2.ndim == 1:\n # mat2 = np.reshape(S2, (1,S2.size))\n mat2 = np.reshape(S2, (S2.size, 1))\n elif S2.ndim == 2:\n mat2 = S2\n else:\n raise ValueError('The function is intended only to handle 1D and 2D numpy arrays')\n\n\n # Do a QR Factorization of S1 and S2\n Q1, R1 = np.linalg.qr(mat1)\n # print('S1 = \\n', S1)\n # print('Q1 = \\n', Q1)\n Q2, R2 = np.linalg.qr(mat2)\n # print('S1 = \\n', S2)\n # print('Q2 = \\n', Q2)\n intmat = np.matmul(Q1.T, Q2)\n # print('intmat = \\n', intmat)\n Y, s, Z = np.linalg.svd(intmat)\n # print('Y = \\n', Y)\n # print('U = \\n', np.matmul(Q1, Y))\n # print('V = \\n', np.matmul(Q2, Y))\n # print('s = \\n', s)\n\n # NaN prevention check\n indices = np.where(s > 1) # Get the indices where the violation exisits\n for entry in indices: # Loop over these indices to fix the violation\n for i in entry:\n if s[i] - 1 < 1.e-13: # This violation limit is pulled out of thin air!\n s[i] = 1.0\n\n s_radians = np.arccos(s)\n\n return s_radians", "def compute_error_minimizing_rotation(Points1, Points2):\r\n #TODO: implement me\r\n\r\n H_1_1 = 0\r\n H_1_2 = 0\r\n H_2_1 = 0\r\n H_2_2 = 0\r\n\r\n for t in range(1, len(Points1)):\r\n H_1_1 = H_1_1 + (Points1[t][0] * Points2[t][0])\r\n H_1_2 = H_1_2 + (Points1[t][1] * Points2[t][0])\r\n H_2_1 = H_2_1 + (Points1[t][0] * Points2[t][1])\r\n H_2_2 = H_2_2 + (Points1[t][1] * Points2[t][1])\r\n\r\n H = [[H_1_1,H_1_2],[H_2_1,H_2_2]]\r\n\r\n U, S, V = numpy.linalg.svd(H)\r\n\r\n V = numpy.transpose(V)\r\n\r\n R_1_1 = (U[0][0] * V[0][0]) +((U[0][1] * V[1][0]))\r\n R_1_2 = (U[0][0] * V[0][1]) +((U[0][1] * V[1][1]))\r\n R_2_1 = (U[1][0] * V[0][0]) +((U[1][1] * V[1][0]))\r\n R_2_2 = (U[1][0] * V[0][1]) +((U[1][1] * V[1][1]))\r\n\r\n R = [[R_1_1,R_1_2],[R_2_1,R_2_2]]\r\n\r\n return R", "def get_z(theta, phi):\n return math.cos(phi)/math.tan(theta/2) + 1j*math.sin(phi)/math.tan(theta/2)", "def differ_by1(self,sd1,sd2):\n if not isinstance(sd1,SlaterDeterminant):\n raise TypeError(\"Parameters sd1 must be SlaterDeterminant instance.\")\n if not isinstance(sd2,SlaterDeterminant):\n raise TypeError(\"Parameters sd2 must be SlaterDeterminant instance.\")\n O1 = 0.0\n O2 = 0.0\n\n sign = sd1.phase(sd2)\n occ_indices = copy.deepcopy(sd1.occ_indices)\n a_list = sd1.annihilation_list(sd2)\n c_list = sd1.creation_list(sd2)\n\n for i in a_list:\n if i in occ_indices:\n occ_indices.remove(i)\n\n for j in c_list:\n if j in occ_indices:\n occ_indices.remove(j)\n O1 += self.h1e[a_list[0],c_list[0]]\n for k in occ_indices:\n O2 += self.g2e[a_list[0],k,c_list[0],k]\n value = O1+O2\n value *=sign\n return value", "def get_ky_and_hyp_pack(name, s1, e1, s2, e2, same: bool,\n hyps: np.ndarray, kernel_grad, cutoffs=None, hyps_mask=None):\n\n # assume sigma_n is the final hyperparameter\n sigma_n, non_noise_hyps, _ = obtain_noise_len(hyps, hyps_mask)\n\n # initialize matrices\n size1 = (e1-s1) * 3\n size2 = (e2-s2) * 3\n k_mat = np.zeros([size1, size2])\n hyp_mat = np.zeros([non_noise_hyps, size1, size2])\n\n args = from_mask_to_args(hyps, hyps_mask, cutoffs)\n\n ds = [1, 2, 3]\n\n training_data = _global_training_data[name]\n # calculate elements\n for m_index in range(size1):\n x_1 = training_data[int(math.floor(m_index / 3))+s1]\n d_1 = ds[m_index % 3]\n\n if (same):\n lowbound = m_index\n else:\n lowbound = 0\n for n_index in range(lowbound, size2):\n x_2 = training_data[int(math.floor(n_index / 3))+s2]\n d_2 = ds[n_index % 3]\n\n # calculate kernel and gradient\n cov = kernel_grad(x_1, x_2, d_1, d_2, *args)\n\n # store kernel value\n k_mat[m_index, n_index] = cov[0]\n grad = from_grad_to_mask(cov[1], hyps_mask)\n hyp_mat[:, m_index, n_index] = grad\n if (same):\n k_mat[n_index, m_index] = cov[0]\n hyp_mat[:, n_index, m_index] = grad\n\n return hyp_mat, k_mat", "def calc_difference(ndvi_tile1, ndvi_tile2, output):\n \n #open dataset and get Affine transformation and bounding properties \n with rio.open(ndvi1) as src1:\n meta = src1.meta.copy()\n transform = src1.meta[\"transform\"]\n x = meta['width']\n y = meta['height']\n band1 = src1.read()\n \n #open dataset \n with rio.open(ndvi2) as src2:\n #read the band as ndarray with the same dimension of src1\n band2 = src2.read(out_shape=(src1.height, src1.width), \n resampling=rio.enums.Resampling.bilinear)\n #create destination for reprojection of src2\n dst_crs = {'init': 'EPSG:32632'}\n proj_band2 = np.empty(src1.shape, dtype=np.float32)\n #reproject the src2 to match src1\n warp.reproject(band2, destination=proj_band2, src_transform=src2.transform, src_crs=src2.crs, \n dst_transform=transform, dst_crs=dst_crs) \n \n #calculate difference between reprojected band2 and band1\n difference = np.subtract(proj_band2, band1)\n #create outfile\n outfile = output\n #write outfile with the properties and resolution of src1\n with rio.open(outfile, 'w', **meta) as dst:\n dst.write(difference, window=rio.windows.Window(col_off=0, row_off=0, width=x, height=y))\n\n return outfile", "def func(X, H1 = anglesAsNpArray):\r\n\r\n #motorAngles = inv_kinematic(X[0], X[1], X[2], X[3], X[4], X[5])\r\n motorAngles = inv_kinematic(X)\r\n H2 = np.array(motorAngles)\r\n\r\n difference = H1 - H2 # calculate the difference between calulated and real angles\r\n\r\n return difference", "def match_cat1_to_cat2(ra1, dec1, ra2, dec2): \n cat1 = SkyCoord(ra=ra1*u.degree, dec=dec1*u.degree) \n cat2 = SkyCoord(ra=ra2*u.degree, dec=dec2*u.degree) \n idx, d2d, d3d = cat1.match_to_catalog_sky(cat2)\n \n return idx, d2d.degree", "def get_angle_contrib(self, groupBy='m'):\n ias3 = []\n types3_z = []\n types3_m = []\n\n e3 = 0.0\n es3 = []\n for aj in self.m.GetAtoms():\n j = aj.GetIdx()\n zj = self.zs[j]\n neibs = aj.GetNeighbors()\n nneib = len(neibs)\n if zj > 1 and nneib > 1:\n for i0 in range(nneib):\n for k0 in range(i0+1,nneib):\n eijk = 0.0\n i, k = neibs[i0].GetIdx(), neibs[k0].GetIdx()\n ias = [i,j,k]\n ias3.append(ias)\n\n ap1, ap2, ap3 = [ self.atypes[ia] for ia in ias ]\n if ap1 > ap3:\n tv = ap1; ap1 = ap3; ap3 = tv # tv: temperay value\n types3_m.append( '-'.join( [ap1, ap2, ap3] ) )\n\n z1, z2, z3 = [ self.zs[ia] for ia in ias ]\n if z1 > z3:\n tv = z1; z1 = z3; z3 = tv\n types3_z.append( '-'.join(['%d'%zi for zi in [z1,z2,z3] ]) )\n\n theta = rdMolTransforms.GetAngleRad(self.m.GetConformer(), i, j, k)\n cosT = np.cos(theta)\n ka, theta0 = rcr.GetUFFAngleBendParams(self.m, i, j, k)\n theta0 = theta0*np.pi/180.0\n cosT0 = np.cos(theta0); sinT0 = np.sin(theta0)\n\n #print ' -- atypes = ', self.atypes\n hybj = self.hybs[j]\n if hybj == 'SP':\n eijk = ka*(1.0 + np.cos(theta))\n elif hybj == 'SP2':\n # energy expression from Openbabel's src file \"forcefielduff.cpp',\n # different from that of Rappe's bad formula,\n eijk = (ka/4.5)*(1.0 + (1.0 + cosT)*(4.0*cosT))\n elif hybj == 'SP3':\n c2 = 1.0 / (4.0 * sinT0 * sinT0)\n c1 = -4.0 * c2 * cosT0;\n c0 = c2*(2.0*cosT0*cosT0 + 1.0);\n eijk = ka*(c0 + c1*cosT + c2*(2.0*cosT*cosT - 1.0))\n else:\n print('not supported atomic type: %s'%apj)\n assert 0\n\n e3 += eijk\n es3.append(eijk)\n self.e3 = e3\n self.es3 = es3\n self.n3 = len(es3)\n self.types3 = {'m':types3_m, 'n':types3_z}[groupBy]\n #return e3, n3, types3, es3", "def wasserstein_diagram_distance(\n pts0: np.ndarray, \n pts1: np.ndarray, \n y_axis: AnyStr = \"death\", \n p: Union[int, float] = 1, \n internal_q: int = 2\n) -> float:\n if y_axis == \"lifetime\": # Non functional for now!\n lifetimes0 = pts0[:, 1]\n lifetimes1 = pts1[:, 1]\n elif y_axis == \"death\":\n lifetimes0 = pts0[:,1] - pts0[:,0]\n lifetimes1 = pts1[:,1] - pts1[:,0]\n else:\n raise ValueError(\"y_axis must be 'death' or 'lifetime'\")\n\n\n if np.isfinite(internal_q):\n if internal_q == 1:\n extra_dist0 = lifetimes0\n extra_dist1 = lifetimes1\n pairwise_dist = pairwise_distances(pts0, pts1, metric=\"l1\")\n elif internal_q == 2:\n extra_dist0 = lifetimes0 / SQRT_2\n extra_dist1 = lifetimes1 / SQRT_2\n pairwise_dist = pairwise_distances(pts0, pts1, metric=\"l2\")\n else:\n extra_dist0 = lifetimes0 * (2 **(1/internal_q - 1))\n extra_dist1 = lifetimes1 * (2 **(1/internal_q - 1))\n pairwise_dist = pairwise_distances(pts0, pts1, metric=\"minkowski\", p=internal_q)\n else:\n extra_dist0 = (pts0[:,1]-pts0[:,0])/2\n extra_dist1 = (pts1[:,1]-pts1[:,0])/2\n pairwise_dist = pairwise_distances(pts0, pts1, metric=\"chebyshev\")\n\n rows_with_zeros = np.any(pairwise_dist == 0, axis=1)\n cols_with_zeros = np.any(pairwise_dist == 0, axis=0)\n\n if np.sum(rows_with_zeros) == pts0.shape[0] and np.sum(cols_with_zeros) == pts1.shape[0]:\n return 0.0\n\n pairwise_dist = pairwise_dist[~rows_with_zeros, :][:, ~cols_with_zeros]\n extra_dist0 = extra_dist0[~rows_with_zeros]\n extra_dist1 = extra_dist1[~cols_with_zeros]\n\n all_pairs_ground_distance_a = np.hstack([pairwise_dist, extra_dist0[:, np.newaxis]])\n extra_row = np.zeros(all_pairs_ground_distance_a.shape[1])\n extra_row[: pairwise_dist.shape[1]] = extra_dist1\n all_pairs_ground_distance_a = np.ascontiguousarray(np.vstack([all_pairs_ground_distance_a, extra_row]))\n\n if p != 1:\n all_pairs_ground_distance_a = all_pairs_ground_distance_a ** p\n\n n0 = pairwise_dist.shape[0]\n n1 = pairwise_dist.shape[1]\n a = np.ones(n0 + 1)\n a[n0] = n1\n a /= a.sum()\n b = np.ones(n1 + 1)\n b[n1] = n0\n b /= b.sum()\n\n base_dist = (n0 + n1) * ot.emd2(a, b, all_pairs_ground_distance_a, processes=1, numItermax=200000)\n\n if p != 1:\n return np.power(base_dist, 1.0 / p)\n else:\n return base_dist", "def differ_by2(self,sd1,sd2):\n if not isinstance(sd1,SlaterDeterminant):\n raise TypeError(\"Parameters sd1 must be SlaterDeterminant instance.\")\n if not isinstance(sd2,SlaterDeterminant):\n raise TypeError(\"Parameters sd2 must be SlaterDeterminant instance.\")\n value = 0.0\n\n sign = sd1.phase(sd2)\n a_list = sd1.annihilation_list(sd2)\n c_list = sd1.creation_list(sd2)\n value += self.g2e[a_list[0],a_list[1],c_list[0],c_list[1]]\n value *=sign\n return value", "def test_delta_in_diff(self):\n xk = 1 * self.ureg.kelvin\n yk = 2 * self.ureg.kelvin\n yf = yk.to('degF')\n yc = yk.to('degC')\n self.assertEqual(yk - xk, 1 * self.ureg.kelvin)\n self.assertEqual(yf - xk, 1 * self.ureg.kelvin)\n self.assertEqual(yc - xk, 1 * self.ureg.kelvin)", "def get_angle(p1, p2):\n return math.atan2(p2[1] - p1[1], p2[0] - p1[0])", "def _calc_longitudes(self):\n assert self.tracers\n\n # zp is z unit vector at all times, shape 501 x 3\n zp = np.cross(self.p0_positions, self.p0_velocities)\n zp = zp / np.linalg.norm(zp, axis=-1)[:, None]\n\n # xp and yp are x and y unit vectors\n xp = self.p0_positions\n xp = xp / np.linalg.norm(xp, axis=-1)[:, None]\n yp = np.cross(zp, xp)\n\n # project particle positions into orbital x-y plane\n p1_xp = np.sum(self.p1_positions * xp[:, None, :], axis=-1)\n p1_yp = np.sum(self.p1_positions * yp[:, None, :], axis=-1)\n p2_xp = np.sum(self.p2_positions * xp[:, None, :], axis=-1)\n p2_yp = np.sum(self.p2_positions * yp[:, None, :], axis=-1)\n\n # get longitudes\n p1_phi = np.arctan2(p1_yp, p1_xp)\n p2_phi = np.arctan2(p2_yp, p2_xp)\n\n # add/subtract multiples of 2pi for particles on higher wraps.\n dp = np.vstack((np.zeros((1, self.N1)), np.diff(p1_phi, axis=0)))\n for j in range(self.N1):\n changes = np.where(np.abs(dp[:, j]) > 1.1 * pi)[0]\n for i in range(changes.size):\n p1_phi[changes[i]:, j] -= 2 * pi * np.sign(dp[changes[i], j])\n dp = np.vstack((np.zeros((1, self.N2)), np.diff(p2_phi, axis=0)))\n for j in range(self.N2):\n changes = np.where(np.abs(dp[:, j]) > 1.1 * pi)[0]\n for i in range(changes.size):\n p2_phi[changes[i]:, j] -= 2 * pi * np.sign(dp[changes[i], j])\n\n return p1_phi, p2_phi", "def keep_till_central_abundance_He_C(bh, h1, h2, Ystop=1.0e-5, XCstop=1.0):\n if (bh is None) or (h1 is None) or (h2 is None):\n #at least one histroy is missing\n return bh, h1, h2, ''\n elif (not (\"age\" in bh.dtype.names)):\n #at least one histroy doesn't contain an age column\n return bh, h1, h2, ''\n \n h1_colnames = h1.dtype.names\n if (\"center_he4\" in h1_colnames) and (\"center_c12\" in h1_colnames):\n if (len(h1[\"center_he4\"])>0) and (len(h1[\"center_c12\"])>0):\n depleted1 = ((h1[\"center_he4\"][-1]<Ystop) and (h1[\"center_c12\"][-1]<XCstop))\n else:\n depleted1 = False\n else:\n depleted1 = False\n h2_colnames = h2.dtype.names\n if (\"center_he4\" in h2_colnames) and (\"center_c12\" in h2_colnames):\n if (len(h2[\"center_he4\"])>0) and (len(h2[\"center_c12\"])>0):\n depleted2 = ((h2[\"center_he4\"][-1]<Ystop) and (h2[\"center_c12\"][-1]<XCstop))\n else:\n depleted2 = False\n else:\n depleted2 = False\n\n if (not depleted1) and (not depleted2):\n #none of the stars reached He depletion\n return bh, h1, h2, ''\n \n if depleted1:\n# where_conditions_met1 = np.where((h1[\"center_he4\"]<Ystop) and (h1[\"center_c12\"]<XCstop))[0]\n where_conditions_met1 = []\n for i in range(len(h1[\"center_he4\"])):\n if (h1[\"center_he4\"][i]<Ystop) and (h1[\"center_c12\"][i]<XCstop):\n where_conditions_met1 += [i]\n if len(where_conditions_met1) == 0:\n warnings.warn(\"No He depletion found in h1, while expected.\")\n return bh, h1, h2, ''\n last_index = where_conditions_met1[0]\n newTF1 = 'Primary got stopped before central carbon depletion'\n if depleted2:\n# where_conditions_met2 = np.where((h2[\"center_he4\"]<Ystop) and (h2[\"center_c12\"]<XCstop))[0]\n where_conditions_met2 = []\n for i in range(len(h2[\"center_he4\"])):\n if (h2[\"center_he4\"][i]<Ystop) and (h2[\"center_c12\"][i]<XCstop):\n where_conditions_met2 += [i]\n if len(where_conditions_met2) == 0:\n warnings.warn(\"No He depletion found in h2, while expected.\")\n return bh, h1, h2, ''\n if depleted1:\n #both stars went beyond He depletion\n last_index2 = where_conditions_met2[0]\n if (\"star_age\" in h1.dtype.names):\n age_He_depletion1 = h1[\"star_age\"][last_index]\n else:\n age_He_depletion1 = bh[\"age\"][last_index]\n if (\"star_age\" in h1.dtype.names):\n age_He_depletion2 = h2[\"star_age\"][last_index2]\n else:\n age_He_depletion2 = bh[\"age\"][last_index2]\n if age_He_depletion1>age_He_depletion2:\n #take star which reached He depletion first\n last_index = last_index2\n newTF1 = 'Secondary got stopped before central carbon depletion'\n else:\n last_index = where_conditions_met2[0]\n newTF1 = 'Secondary got stopped before central carbon depletion'\n \n # include the point above the stopping criteria so that the last inferred\n # stellar state will be XXX_Central_He_depleted. This is essential to find\n # the core mass at He depletion with the calculate_Patton20_values_at_He_depl\n # method used to calculate the core collapse properties with the \n # Patton&Sukhbold mechanism.\n if len(bh) >= last_index+2:\n last_index += 1\n \n new_bh = bh[:last_index]\n new_h1 = h1[:last_index]\n new_h2 = h2[:last_index]\n\n return new_bh, new_h1, new_h2, newTF1", "def getOrientationVect(self, a,b):\r\n return np.array(a)-np.array(b)" ]
[ "0.614044", "0.5826733", "0.5789848", "0.5688521", "0.5658761", "0.56020504", "0.5570326", "0.5457091", "0.54170704", "0.54091537", "0.5399414", "0.5383706", "0.53525215", "0.5337892", "0.53332806", "0.5322254", "0.5320831", "0.53138745", "0.53113425", "0.5300493", "0.52915627", "0.5289074", "0.5280364", "0.52695245", "0.52540374", "0.5209026", "0.51963913", "0.51924527", "0.5187387", "0.51811934", "0.5169834", "0.5154681", "0.5146329", "0.5146258", "0.5145088", "0.51377165", "0.51258045", "0.51227796", "0.51103455", "0.510941", "0.50603163", "0.5055717", "0.504971", "0.50486773", "0.50455165", "0.50392914", "0.5038417", "0.5032141", "0.50304925", "0.50282234", "0.5016163", "0.50160056", "0.50021434", "0.49889874", "0.49885195", "0.49879804", "0.49875438", "0.4985622", "0.49850997", "0.4982784", "0.49784234", "0.49717626", "0.4968535", "0.49644402", "0.49596566", "0.4955088", "0.49518782", "0.49513543", "0.49476796", "0.4946491", "0.49441737", "0.49396512", "0.4938539", "0.49337056", "0.49318725", "0.492922", "0.49290156", "0.49214342", "0.49208966", "0.49121633", "0.49065855", "0.48997816", "0.48963603", "0.48874265", "0.48870605", "0.4884107", "0.48837855", "0.48834318", "0.4883173", "0.48791394", "0.48773843", "0.48769677", "0.48756242", "0.48740155", "0.48719922", "0.48717377", "0.48712286", "0.48705846", "0.4861925", "0.48600352" ]
0.5451437
8
Find overall 3D rotation (phi theta psi) between two sets of Eulerian angles. The two sets have to be of the same length and it is assume that k'th element on the first list corresponds to the k'th element on the second list.
def rotation_between_anglesets(agls1, agls2): from math import sin, cos, pi, sqrt, atan2, acos, atan from numpy import array, linalg, matrix import types deg2rad = pi/180.0 def ori2xyz(ori): if(type(ori) == types.ListType): phi, theta, psi = ori[:3] else: # it has to be Transformation object d = ori.get_params("spider") phi = d["phi"] theta = d["theta"] psi = d["psi"] """ # This makes no sense here! PAP 09/2011 if theta > 90.0: phi += 180.0 theta = 180.0-theta """ phi *= deg2rad theta *= deg2rad x = sin(theta) * sin(phi) y = sin(theta) * cos(phi) z = cos(theta) return [x, y, z] N = len(agls1) if N != len(agls2): print 'Both lists must have the same length' return -1 if N < 2: print 'At least two orientations are required in each list' return -1 U1, U2 = [], [] for n in xrange(N): p1 = ori2xyz(agls1[n]) p2 = ori2xyz(agls2[n]) U1.append(p1) U2.append(p2) # compute all Suv with uv = {xx, xy, xz, yx, ..., zz} Suv = [0] * 9 c = 0 nbori = len(U1) for i in xrange(3): for j in xrange(3): for s in xrange(nbori): Suv[c] += (U2[s][i] * U1[s][j]) c += 1 # create matrix N N = array([[Suv[0]+Suv[4]+Suv[8], Suv[5]-Suv[7], Suv[6]-Suv[2], Suv[1]-Suv[3]], [Suv[5]-Suv[7], Suv[0]-Suv[4]-Suv[8], Suv[1]+Suv[3], Suv[6]+Suv[2]], [Suv[6]-Suv[2], Suv[1]+Suv[3], -Suv[0]+Suv[4]-Suv[8], Suv[5]+Suv[7]], [Suv[1]-Suv[3], Suv[6]+Suv[2], Suv[5]+Suv[7], -Suv[0]-Suv[4]+Suv[8]]]) # eigenvector corresponding to the most positive eigenvalue val, vec = linalg.eig(N) q0, qx, qy, qz = vec[:, val.argmax()] # create quaternion Rot matrix r = [q0*q0-qx*qx+qy*qy-qz*qz, 2*(qy*qx+q0*qz), 2*(qy*qz-q0*qx), 0.0, 2*(qx*qy-q0*qz), q0*q0+qx*qx-qy*qy-qz*qz, 2*(qx*qz+q0*qy), 0.0, 2*(qz*qy+q0*qx), 2*(qz*qx-q0*qy), q0*q0-qx*qx-qy*qy+qz*qz, 0.0] R = Transform(r) dictR = R.get_rotation('SPIDER') return dictR['phi'], dictR['theta'], dictR['psi']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_rotation(a, b):\n a.shape = (3,)\n b.shape = (3,)\n\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n \n v = np.cross(a, b)\n \n angle_AB = -1*vector_angle(a, b) \n \n print(angle_AB)\n s = np.linalg.norm(v) * np.sin(angle_AB)\n \n c = np.dot(a, b) * np.cos(angle_AB)\n \n # Rotation matrix, R = I + Vx + Vx^2 * (1-c)/s^2\n I = np.identity(3)\n Vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n \n R = I + Vx + np.linalg.matrix_power(Vx, 2) / (1+c)\n return R", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def common_line_in3D(phiA,thetaA,phiB,thetaB):\n\n\tfrom math import pi, sqrt, cos, sin, asin, atan2\n\n\tpiOver=pi/180.0;\n\tph1 = phiA*piOver; \n\tth1 = thetaA*piOver; \n\tph2 = phiB*piOver; \n\tth2 = thetaB*piOver;\n\t\n \t#nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ;\n\t#ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ;\n\t#nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR);\n\n\n\tnx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2)\n\tny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2)\n\tnz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2)\n\n\tnorm = nx*nx + ny*ny + nz*nz\n \n\tif norm < 1e-5:\n\t\t#print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB\n\t\treturn 0.0, 0.0\n\n\tif nz<0: nx=-nx; ny=-ny; nz=-nz;\n\n\t#thetaCom = asin(nz/sqrt(norm))\n\tphiCom = asin(nz/sqrt(norm))\n\t#phiCom = atan2(ny,nx)\n\tthetaCom = atan2(ny, nx)\n\t\n\treturn phiCom*180.0/pi , thetaCom*180.0/pi", "def find_rotation(a, b):\n if not np:\n raise PysimmError('pysimm.calc.find_rotation function requires numpy')\n a = np.array(a)\n b = np.array(b)\n\n a_x_b = np.cross(a, b)\n axis = a_x_b / np.linalg.norm(a_x_b)\n theta = acos(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))\n\n skew = np.matrix([[0, -axis[2], axis[1]],\n [axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]])\n\n rot_matrix = np.identity(3) + sin(theta) * skew + (1 - cos(theta)) * skew * skew\n return rot_matrix", "def rotation_elements(self, eta, phi, theta):\n \n # Three-axis rotation:\n # 1. Rotate about +z by eta (follows RHR; rotation is mathematical and thus counter-clockwise)\n # 2. Tilt by phi with respect to +z (rotation about y-axis) then\n # 3. rotate by theta in-place (rotation about z-axis) ### BUG: This isn't a conceptual rotation about z (influenced by other rotations)\n \n\n eta = radians( eta ) # eta is orientation around the z axis (before reorientation)\n phi = radians( phi ) # phi is grain tilt (with respect to +z axis)\n theta = radians( theta ) # grain orientation (around the z axis)\n \n rotation_elements = [[ cos(eta)*cos(phi)*cos(theta)-sin(eta)*sin(theta) ,\n -cos(eta)*cos(phi)*sin(theta)-sin(eta)*cos(theta) ,\n -cos(eta)*sin(phi) ],\n [ sin(eta)*cos(phi)*cos(theta)+cos(eta)*sin(theta) ,\n -sin(eta)*cos(phi)*sin(theta)+cos(eta)*cos(theta) ,\n sin(eta)*sin(phi) ],\n [ -sin(phi)*cos(theta) ,\n sin(phi)*sin(theta) ,\n cos(phi) ]]\n \n return rotation_elements", "def euler_timestep_rotation(sphere_positions, sphere_rotations, new_sphere_positions, new_sphere_rotations, Oa_out, timestep):\r\n\r\n for i in range(sphere_positions.shape[0]):\r\n R0 = sphere_positions[i]\r\n O = (Oa_out[i][0] ** 2 + Oa_out[i][1] ** 2 + Oa_out[i][2] ** 2) ** 0.5\r\n\r\n ''' To rotate from basis (x,y,z) to (X,Y,Z), where x,y,z,X,Y,Z are unit vectors,\r\n you just need to multiply by the matrix\r\n ( X_x Y_x Z_x )\r\n ( X_y Y_y Z_y ),\r\n ( X_z Y_z Z_z )\r\n where X_x means the x-component of X.\r\n Our Z is Omega = o_spheres[i], so we need to make it into a complete basis.\r\n To do that we pick a unit vector different to Omega (either zhat or xhat depending on Omega)\r\n and use (Omega x zhat, Omega x (Omega x zhat), zhat) as our basis (X,Y,Z).\r\n That's it! [Only took me three days...]\r\n '''\r\n\r\n if np.array_equal(Oa_out[i], [0, 0, 0]):\r\n rot_matrix = np.identity(3)\r\n else:\r\n Otest = (abs(Oa_out[i] / O)).astype('float')\r\n perp1 = [0, 0, 1] if np.allclose(Otest, [1, 0, 0]) else [1, 0, 0]\r\n rot_matrix = np.array([np.cross(Oa_out[i], perp1) / O, np.cross(Oa_out[i], np.cross(Oa_out[i], perp1)) / O ** 2, Oa_out[i] / O]).transpose()\r\n\r\n for j in range(2):\r\n ''' rb0 is the position (\"r\") of the endpoint of the pointy rotation vector in the\r\n external (x,y,z) frame (\"b\") at the beginning of this process (\"0\") '''\r\n rb0 = sphere_rotations[i, j]\r\n\r\n ''' rbdashdash0_xyz is the position of the same endpoint in the frame of the rotating sphere (\"b''\"),\r\n\t\t\t\t\t\twhich we set to have the z-axis=Omega axis. It's in Cartesian coordinates. '''\r\n rbdashdash0_xyz = np.dot(linalg.inv(rot_matrix), (rb0 - R0))\r\n x0 = rbdashdash0_xyz[0]\r\n y0 = rbdashdash0_xyz[1]\r\n z0 = rbdashdash0_xyz[2]\r\n\r\n r0 = (x0 ** 2 + y0 ** 2 + z0 ** 2) ** 0.5\r\n t0 = np.arccos(z0 / r0)\r\n p0 = 0 if (x0 == 0 and y0 == 0) else np.arctan2(y0, x0)\r\n r = r0\r\n t = t0\r\n p = euler_timestep(p0, O, timestep)\r\n\r\n x = r * np.sin(t) * np.cos(p)\r\n y = r * np.sin(t) * np.sin(p)\r\n z = r * np.cos(t)\r\n rbdashdash_xyz = np.array([x, y, z])\r\n R = new_sphere_positions[i]\r\n rb = R + np.dot(rot_matrix, rbdashdash_xyz)\r\n new_sphere_rotations[i, j] = rb\r\n return new_sphere_rotations", "def check_angles(self, force0, force1):\n\n assert type(force0) == type(force1), \"Error: force0 and force1 must be the same type.\"\n assert type(force0) == mm.HarmonicAngleForce, \"Error: forces must be HarmonicAngleForces\"\n\n n_angles0 = force0.getNumAngles()\n n_angles1 = force1.getNumAngles()\n\n dict0, dict1 = {}, {}\n\n i0, i1, i2, theta0, k0 = force0.getAngleParameters(0)\n #unit_theta = theta0.unit\n unit_theta = u.degrees\n #unit_k = k0.unit\n unit_k = u.kilojoules_per_mole/(u.degrees)**2\n\n for k in range(n_angles0):\n i0, i1, i2, theta0, k0 = force0.getAngleParameters(k)\n if (k0 / k0.unit) != 0.0: # Skip forces with strength 0.0\n i0, i1, i2 = reorder_angles(i0, i1, i2)\n dict0[i0, i1, i2] = ((theta0 / unit_theta, k0 / unit_k))\n\n for k in range(n_angles1):\n i0, i1, i2, theta0, k0 = force1.getAngleParameters(k)\n if (k0 / k0.unit) != 0.0: # Skip forces with strength 0.0\n i0, i1, i2 = reorder_angles(i0, i1, i2)\n dict1[i0, i1, i2] = ((theta0 / unit_theta, k0 / unit_k))\n\n keys0 = set(dict0.keys())\n keys1 = set(dict1.keys())\n logger.info(\"Angles0 - Angles1 = %s\" % (keys0.difference(keys1)))\n logger.info(\"Angles1 - Angles0 = %s\" % (keys1.difference(keys0)))\n diff_keys = keys0.symmetric_difference(keys1)\n assert diff_keys == set(), \"Systems have different HarmonicAngleForce entries: extra keys are: \\n%s\" % diff_keys\n\n for k, parameter_name in enumerate([\"theta0\", \"k0\"]):\n for (i0, i1, i2) in dict0.keys():\n val0 = dict0[i0, i1, i2][k]\n val1 = dict1[i0, i1, i2][k]\n if parameter_name=='theta0':\n assert compare(val0, val1), \"Error: Harmonic Angle (%d, %d, %d) has angle values of %f and %f degrees, respectively.\" % (i0, i1, i2, val0, val1)\n else:\n assert compare(val0, val1), \"Error: Harmonic Angle (%d, %d, %d) has force constant values of %f and %f kJ/(mol degree**2), respectively.\" % (i0, i1, i2, val0, val1)", "def euler2quat(angles, rot_seq='zyx'):\n cangle = np.cos(0.5*angles)\n sangle = np.sin(0.5*angles)\n rot_seq = rot_seq.lower()\n if rot_seq == 'zyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'zyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'zxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'zxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'yxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'yzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'xyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'xzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n else:\n return False", "def solve_r3_rotation_for_angles_given_axes(\n R, e1, e2, e3, smaller_phi2_solution=True, return_both_solutions=False, deg=False\n):\n\n assert R.is_r3_rotation_matrix()\n e1 = matrix.col(e1).normalize()\n e2 = matrix.col(e2).normalize()\n e3 = matrix.col(e3).normalize()\n # Fail if e2 & e3 are parallel\n e2xe3 = e2.cross(e3)\n if e2xe3.length_sq() < 1.0e-6:\n return None\n # Make a unit test vector\n u = e2xe3.normalize()\n e1e2 = e1.dot(e2)\n e1e3 = e1.dot(e3)\n e2e3 = e2.dot(e3)\n e1e2e3 = e1.dot(e2.cross(e3))\n Re3 = R * e3\n e1Re3 = e1.dot(Re3)\n # ** Step 1 ** Calculation of phi2 (Bricogne equation (4))\n # e1.(R e3) = (e1.e2)(e2.e3) + {(e1.e3) - (e1.e2)(e2.e3)} cos(phi2)\n # + (e1.e2 x e3) sin(phi2)\n # The coefficients of cos & sin phi2\n cc = e1e3 - e1e2 * e2e3\n ss = e1e2e3\n # Fail if both are zero (indeterminate)\n if abs(cc) < 1.0e-6 and abs(ss) < 1.0e-6:\n return None\n norm = math.sqrt(cc * cc + ss * ss)\n rhs = (e1Re3 - e1e2 * e2e3) / norm\n # abs(rhs) should not be greater than 1.0, allowing a small tolerance\n if abs(rhs) > 1.000002:\n return None\n if rhs > 1.0:\n rhs = 1.0\n elif rhs < -1.0:\n rhs = -1.0\n cc /= norm\n ss /= norm\n # Solve rhs = cos(phi2) * cc + sin(phi2) * ss\n # using cos(a-b) = cos(a) cos(b) + sin(a) sin(b)\n # where b = phi2\n a = math.atan2(ss, cc)\n amb = math.acos(rhs)\n # Two solutions in range -pi to +pi\n # Note that if e1 == e3, ss = 0, a = 0 & phi2b = -phi2a\n phi2a = a - amb\n if phi2a > math.pi:\n phi2a -= 2.0 * math.pi\n elif phi2a < -math.pi:\n phi2a += 2.0 * math.pi\n phi2b = a + amb\n if phi2b > math.pi:\n phi2b -= 2.0 * math.pi\n elif phi2b < -math.pi:\n phi2b += 2.0 * math.pi\n if return_both_solutions:\n phi2_ = (phi2a, phi2b)\n elif smaller_phi2_solution:\n if abs(phi2a) < abs(phi2b):\n phi2_ = (phi2a,)\n else:\n phi2_ = (phi2b,)\n else:\n if abs(phi2a) > abs(phi2b):\n phi2_ = (phi2a,)\n else:\n phi2_ = (phi2b,)\n solutions = []\n for phi2 in phi2_:\n # ** Step 2 ** Calculation of phi1\n R2 = e2.axis_and_angle_as_r3_rotation_matrix(phi2, deg=False)\n R2inv = R2.transpose()\n v = R2 * e3\n w = Re3\n v1 = v - (v.dot(e1)) * e1\n w1 = w - (w.dot(e1)) * e1\n norm = v1.dot(v1) * w1.dot(w1)\n # If norm = 0, rotations 1 & 3 are around same axis (for this phi2),\n # so any value for phi1 is OK\n if norm > 1.0e-8:\n norm = math.sqrt(norm)\n # cos(phi1) = (v1.w1)/norm\n # sin(phi1) = (v1.w1 x e1)/norm\n phi1 = math.atan2(v1.dot(w1.cross(e1)) / norm, v1.dot(w1) / norm)\n if phi1 > math.pi:\n phi1 -= 2.0 * math.pi\n if phi1 < -math.pi:\n phi1 += 2.0 * math.pi\n else:\n phi1 = 0.0\n # ** Step 3 ** Calculation of phi3\n R1inv = e1.axis_and_angle_as_r3_rotation_matrix(-1.0 * phi1, deg=False)\n R3 = R2inv * R1inv * R\n R3u = R3 * u\n # sin(phi3) = u.R3u x e3\n # cos(phi3) = u.R3u\n phi3 = math.atan2(u.dot(R3u.cross(e3)), u.dot(R3u))\n if deg:\n phi1, phi2, phi3 = tuple([x * 180 / math.pi for x in (phi1, phi2, phi3)])\n solutions.append((phi1, phi2, phi3))\n\n if return_both_solutions:\n return solutions\n else:\n return solutions[0]", "def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn", "def angle_hkls(self, h1, h2):\n h1v = norm_vec((vec(*h1).T * self.Bmat)).T\n h2v = norm_vec((vec(*h2).T * self.Bmat)).T\n return np.around(np.arccos(h1v.T*h2v)[0, 0] * degrees, 3)", "def get_phi_kappa_omega(self, angles):\n (phi) = angles[0]\n (kappa) = angles[1]\n (omega) = angles[2]\n return (phi, kappa, omega)", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def rotate(self, ra1, dec1, ra2, dec2, ra3, dec3):\n # Turns Right Ascension/Declination into Azimuth/Zenith for healpy\n phi1 = ra1 - np.pi\n zen1 = np.pi/2. - dec1\n phi2 = ra2 - np.pi\n zen2 = np.pi/2. - dec2\n phi3 = ra3 - np.pi\n zen3 = np.pi/2. - dec3\n\n # Rotate each ra1 and dec1 towards the pole?\n x = np.array([hp.rotator.rotateDirection(\n hp.rotator.get_rotation_matrix((dp, -dz, 0.))[0], z, p)\n for z, p, dz, dp in zip(zen1, phi1, zen2, phi2)])\n\n # Rotate **all** these vectors towards ra3, dec3 (source_path)\n zen, phi = hp.rotator.rotateDirection(np.dot(\n hp.rotator.get_rotation_matrix((-phi3, 0, 0))[0],\n hp.rotator.get_rotation_matrix((0, zen3, 0.))[0]), x[:, 0], x[:, 1])\n\n dec = np.pi/2. - zen\n ra = phi + np.pi\n return np.atleast_1d(ra), np.atleast_1d(dec)", "def rotation(x1, z1, x2, z2):\n e1 = np.zeros(shape=(3, 3))\n e2 = np.zeros(shape=(3, 3))\n e1[0, :] = x1 / np.linalg.norm(x1)\n e1[2, :] = z1 / np.linalg.norm(z1)\n e1[1, :] = np.cross(e1[2, :], e1[0, :])\n e2[0, :] = x2 / np.linalg.norm(x2)\n e2[2, :] = z2 / np.linalg.norm(z2)\n e2[1, :] = np.cross(e2[2, :], e2[0, :])\n R = np.zeros(shape=(3, 3))\n for i in range(3):\n for j in range(3):\n R[i, j] = np.dot(e1[i, :], e2[j, :])\n R = np.transpose(R)\n return R", "def angles_points(a, b, c):\n u = subtract_vectors(b, a)\n v = subtract_vectors(c, a)\n return angles_vectors(u, v)", "def test_d_3():\n rs = 20\n d = 3\n np.random.seed(rs)\n number_rotations = 3\n\n theta_1 = np.random.uniform(0, 2 * math.pi)\n rotation_1 = np.identity(d)\n pos_1 = np.random.randint(0, d - 1)\n pos_2 = np.random.randint(pos_1 + 1, d)\n rotation_1[pos_1, pos_1] = math.cos(theta_1)\n rotation_1[pos_1, pos_2] = - math.sin(theta_1)\n rotation_1[pos_2, pos_1] = math.sin(theta_1)\n rotation_1[pos_2, pos_2] = math.cos(theta_1)\n\n theta_2 = np.random.uniform(0, 2 * math.pi)\n rotation_2 = np.identity(d)\n pos_3 = np.random.randint(0, d - 1)\n pos_4 = np.random.randint(pos_3 + 1, d)\n rotation_2[pos_3, pos_3] = math.cos(theta_2)\n rotation_2[pos_3, pos_4] = - math.sin(theta_2)\n rotation_2[pos_4, pos_3] = math.sin(theta_2)\n rotation_2[pos_4, pos_4] = math.cos(theta_2)\n\n theta_3 = np.random.uniform(0, 2 * math.pi)\n rotation_3 = np.identity(d)\n pos_5 = np.random.randint(0, d - 1)\n pos_6 = np.random.randint(pos_5 + 1, d)\n rotation_3[pos_5, pos_5] = math.cos(theta_3)\n rotation_3[pos_5, pos_6] = - math.sin(theta_3)\n rotation_3[pos_6, pos_5] = math.sin(theta_3)\n rotation_3[pos_6, pos_6] = math.cos(theta_3)\n\n final_rotation = rotation_1 @ rotation_2 @ rotation_3\n np.random.seed(rs)\n rotation_function = (mt_obj.calculate_rotation_matrix\n (d, number_rotations))\n assert(np.all(final_rotation == rotation_function))", "def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])", "def euler2rot3D(psi, theta, phi):\n Rphi = np.array([[np.cos(phi), np.sin(phi), 0],\n [-np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n Rtheta = np.array([[np.cos(theta), 0, -np.sin(theta)],\n [0, 1, 0],\n [np.sin(theta), 0, np.cos(theta)]])\n Rpsi = np.array([[np.cos(psi), np.sin(psi), 0],\n [-np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(Rpsi, np.dot(Rtheta, Rphi))", "def helical_consistency(p2i, p1):\n\tfrom pixel_error import angle_diff\n\tfrom math import cos,pi\n\tfrom utilities import getvec\n\tfrom pixel_error import angle_error\n\tfrom EMAN2 import Vec2f\n\tn =len(p1[0])\n\tprint n\n\tqtm = -1.0e10\n\tfor lf in xrange(0,181,180):\n\t\tp2 = []\n\t\tp2.extend(p2i)\n\t\tif( lf == 180):\n\t\t\ttflip = Transform({\"type\":\"spider\",\"theta\":180.0})\n\t\t\tfor j in xrange(n):\n\t\t\t\tt2 = Transform({\"type\":\"spider\",\"phi\":p2[0][j],\"theta\":p2[1][j],\"psi\":p2[2][j]})\n\t\t\t\tt2.set_trans( Vec2f( -p2[3][j], -p2[4][j] ) )\n\t\t\t\tt2 = t2*tflip\n\t\t\t\td = t2.get_params(\"spider\")\n\t\t\t\tp2[0][j] = d[\"phi\"]\n\t\t\t\tp2[1][j] = d[\"theta\"]\n\t\t\t\tp2[2][j] = d[\"psi\"]\n\t\t\t\tp2[3][j] = -d[\"tx\"]\n\t\t\t\tp2[4][j] = -d[\"ty\"]\n\t\ttt1 = [0.0]*n\n\t\ttt2 = [0.0]*n\n\t\tmirror = [False]*n\n\t\tln = 0\n\t\tfor j in xrange( n ):\n\t\t\tt1 = getvec(p1[0][j],p1[1][j])\n\t\t\tt2 = getvec(p2[0][j],p2[1][j])\n\t\t\ttm = getvec(180.0+p2[0][j],180.0-p2[1][j])\n\t\t\ttt1[j] = t1[0]*t2[0]+t1[1]*t2[1]+t1[2]*t2[2]\n\t\t\ttt2[j] = t1[0]*tm[0]+t1[1]*tm[1]+t1[2]*tm[2]\n\t\t\tif(abs(tt1[j])<1.0e-7): tt1[j] = 0.0\n\t\t\tif(abs(tt2[j])<1.0e-7): tt2[j] = 0.0\n\t\t\tif(tt1[j]>tt2[j]):\n\t\t\t\tmirror[j] = True\n\t\t\t\tln+=1\n\t\tprint \" FLIP \",lf\n\t\tif(ln < n//2):\n\t\t\tprint \"mirror \",ln\n\t\t\tfor j in xrange( n ):\n\t\t\t\tp2[0][j] += 180.0\n\t\t\t\tp2[1][j] = 180.0-p2[1][j]\n\t\t\t\tp2[2][j] = -p2[2][j]\n\t\t\t\tp2[4][j] = -p2[4][j]\n\t\t\t\tmirror[j] = not(mirror[j])\n\t\telse:\n\t\t\tprint \" straight\", ln\n\t\tphi1 = []\n\t\tphi2 = []\n\t\tagree = []\n\t\tfor j in xrange(n):\n\t\t\tif(mirror[j]):\n\t\t\t\tphi1.append(p1[0][j])\n\t\t\t\tphi2.append(p2[0][j])\n\t\t\t\tagree.append(j)\n\t\tprint len(phi1)\n\t\tdelta_phi = angle_diff( phi2, phi1 )\n\t\tprint \"close form diff===\", delta_phi\n\n\t\tphi1 = []\n\t\tphi2 = []\n\t\terrorm = []\n\t\tfor j in xrange( len( p1[0]) ):\n\t\t\tp2[0][j] = (p2[0][j] + delta_phi + 360)%360.0\n\t\t\tif(mirror[j]):\n\t\t\t\tphi1.append(p1[0][j])\n\t\t\t\tphi2.append(p2[0][j])\n\t\t\t\terrorm.append(angle_error( [ p2[0][j] ], [ p1[0][j] ]))\n\t\tqt = sum(errorm)/len(errorm)\n\t\tprint len(errorm),qt\n\t\tif(qt > qtm):\n\t\t\tqtm = qt\n\t\t\tp2o = []\n\t\t\tp2o.extend(p2)\n\t\t\terrormo = []\n\t\t\tphi1o = []\n\t\t\tphi2o = []\n\t\t\terrormo.extend(errorm)\n\t\t\tphi1o.extend(phi1)\n\t\t\tphi2o.extend(phi2)\n\t\t\n\treturn p2o, errormo, agree, delta_phi, phi1o, phi2o", "def rot_align(m, coeff, pairs):\n n_theta = 360\n p = pairs.shape[0]\n c = np.zeros((m + 1, p), dtype='complex128')\n m_list = np.arange(1, m + 1)\n\n max_iter = 100\n precision = 1e-10\n\n # Find initial points for Newton Raphson\n for i in range(m + 1):\n c[i] = np.einsum('ij, ij -> j', np.conj(coeff[i][:, pairs[:, 0]]), coeff[i][:, pairs[:, 1]])\n\n c2 = np.flipud(np.conj(c[1:]))\n b = (2 * m + 1) * np.real(common.icfft(np.concatenate((c2, c), axis=0)))\n rot = np.argmax(b, axis=0)\n rot = (rot - m) * n_theta / (2 * m + 1)\n\n # creating f' and f'' function\n m_list_ang_1j = 1j * m_list * np.pi / 180\n c_for_f_prime_1 = m_list_ang_1j * c[1:].T\n c_for_f_prime_2 = np.square(m_list_ang_1j) * c[1:].T\n\n def f_prime(x):\n return np.sum(np.real(c_for_f_prime_1 * np.exp(np.outer(x, m_list_ang_1j))), 1)\n\n def f_prime2(x):\n return np.sum(np.real(c_for_f_prime_2 * np.exp(np.outer(x, m_list_ang_1j))), 1)\n\n # Finding brackets, x1<x2 such that sign(f(x1)) != sign(f(x2)) and rot = (x1 + x2) / 2\n step_size = 0.5\n x1 = rot.copy()\n x2 = rot.copy()\n bad_indices = np.full(p, True)\n while np.any(bad_indices):\n x1[bad_indices] -= step_size\n x2[bad_indices] += step_size\n f_x1 = f_prime(x1)\n f_x2 = f_prime(x2)\n bad_indices = f_x1 * f_x2 > 0\n\n # Setting x1, x2 into x_low, x_high such that f(x_low)<f(x_high).\n x_low = x1.copy()\n x_high = x2.copy()\n f_x_low = f_prime(x_low)\n f_x_high = f_prime(x_high)\n x_high_is_low = f_x_high < f_x_low\n tmp = x_low.copy()\n tmp[x_high_is_low] = x_high[x_high_is_low]\n x_high[x_high_is_low] = x_low[x_high_is_low]\n x_low = tmp\n\n # Handling f(x) = 0 case\n f_x_low = f_prime(x_low)\n f_x_low_0 = f_x_low == 0\n x_high[f_x_low_0] = x_low[f_x_low_0]\n f_x_high = f_prime(x_high)\n f_x_high_0 = f_x_high == 0\n x_low[f_x_high_0] = x_high[f_x_high_0]\n\n rts = (x_low + x_high) / 2\n dx = np.abs(x_low - x_high)\n dx_old = dx.copy()\n f = f_prime(rts)\n df = f_prime2(rts)\n for _ in range(max_iter):\n bisect_indices = np.bitwise_or(((rts - x_high) * df - f) * ((rts - x_low) * df - f) > 0,\n np.abs(2 * f) > np.abs(dx_old * df))\n newton_indices = ~bisect_indices\n dx_old = dx.copy()\n\n # Handling out of range indices with Bisect step\n dx[bisect_indices] = (x_high[bisect_indices] - x_low[bisect_indices]) / 2\n rts[bisect_indices] = x_low[bisect_indices] + dx[bisect_indices]\n\n # Handling the rest with newton step\n dx[newton_indices] = f[newton_indices] / df[newton_indices]\n rts[newton_indices] -= dx[newton_indices]\n\n # Stop criteria\n if np.all(np.abs(dx) < precision):\n break\n\n # Else update parameters\n f = f_prime(rts)\n df = f_prime2(rts)\n f_negative = f < 0\n x_low[f_negative] = rts[f_negative]\n x_high[~f_negative] = rts[~f_negative]\n\n # Changing low and high of converged points\n converged = np.abs(dx) < precision\n x_low[converged] = rts[converged]\n x_high[converged] = rts[converged]\n\n print(np.sum(np.abs(dx) < precision))\n\n rot = rts\n m_list = np.arange(m + 1)\n m_list_ang = m_list * np.pi / 180\n c *= np.exp(1j * np.outer(m_list_ang, rot))\n corr = (np.real(c[0]) + 2 * np.sum(np.real(c[1:]), axis=0)) / 2\n\n return corr, rot", "def rotation(self, e1, e2, theta):\n e1_r = e1 * numpy.cos(2 * theta) - e2 * numpy.sin(2 * theta)\n e2_r = e1 * numpy.sin(2 * theta) + e2 * numpy.cos(2 * theta)\n return e1_r, e2_r", "def add_triangles(t1, t2):\n solutions = []\n for i in range(3):\n for j in range(3):\n # See if t1 angle 0 and t2 angle i can be merged\n if eq(t1.angles[i] + t2.angles[j], math.pi):\n # The two angles (t1[i] and t2[j]) fit together to form a straight\n # line. Now we just need to make sure that the sides that are\n # merging are the same length\n if eq(t1.sides[(i + 1) % 3], t2.sides[(j + 2) % 3]):\n # Calculate the dx and dy on the side of t1 that's being \"extended\"\n dx = t1.vertices[i][0] - t1.vertices[(i + 1) % 3][0]\n dy = t1.vertices[i][1] - t1.vertices[(i + 1) % 3][1]\n\n v3x = t1.vertices[i][0] + dx * t2.sides[(j + 1) % 3] / t1.sides[(i + 2) % 3]\n v3y = t1.vertices[i][1] + dy * t2.sides[(j + 1) % 3] / t1.sides[(i + 2) % 3]\n solutions.append(Triangle([t1.vertices[(i + 1) % 3],\n t1.vertices[(i + 2) % 3],\n (v3x, v3y)]))\n\n if eq(t1.sides[(i + 2) % 3], t2.sides[(j + 1) % 3]):\n # Calculate the dx and dy on the side of t1 that's being \"extended\"\n dx = t1.vertices[i][0] - t1.vertices[(i + 2) % 3][0]\n dy = t1.vertices[i][1] - t1.vertices[(i + 2) % 3][1]\n\n v3x = t1.vertices[i][0] + dx * t2.sides[(j + 2) % 3] / t1.sides[(i + 1) % 3]\n v3y = t1.vertices[i][1] + dy * t2.sides[(j + 2) % 3] / t1.sides[(i + 1) % 3]\n solutions.append(Triangle([t1.vertices[(i + 1) % 3],\n t1.vertices[(i + 2) % 3],\n (v3x, v3y)]))\n\n return solutions", "def get_angle(a: Keypoint, b: Keypoint, c: Keypoint) -> float:\n # get a vector with origin in (0,0) from points a and b by substracting Point a from Point b\n vector_a = keypoint_to_vector(a, b)\n vector_c = keypoint_to_vector(c, b)\n # https://de.wikipedia.org/wiki/Skalarprodukt => winkel phi = arccos(...)\n phi = np.arccos(np.dot(vector_a, vector_c) / (np.linalg.norm(vector_a) * np.linalg.norm(vector_c)))\n angle_left_opening = np.cross(vector_a, vector_c) < 0\n return phi if angle_left_opening else -phi", "def test_rotation_isometry(self):\n import numpy\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n \n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n # 1/sqrt(2)\n s2_ref = 0.707106781186547524400844362104785\n\n o = s.make_origin(2)\n p = s.make_point((1, 0), magic)\n q = s.make_point((s2_ref, s2_ref), magic)\n\n rot = space_point_transform(\n numpy.array([[1,0,0],[0,s2_ref,-s2_ref],[0,s2_ref,s2_ref]]),\n curvature=k,\n math = common_math\n )\n\n f, g, i = map(space_point_transform, (p, q, o))\n\n def check_transform_eq(t1, t2, invert=False):\n for ref in (\n s.make_point((5/13, 12/13), magic),\n s.make_point((-3/5, 4/5), magic)\n ):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref),\n abs_tol = 1e-12\n ))\n\n # 1/8 turn, times 8\n check_transform_eq(rot*8, i)\n\n # rotate, shift, rotate\n check_transform_eq(g, rot + f + rot * -1)\n\n # the other way\n check_transform_eq(f, rot * -1 + g + rot)", "def addVectors(r1, r2):\n \"\"\" [0] = angle, [1] = lenght \"\"\"\n x = (math.sin(r1[0]) * r1[1]) + (math.sin(r2[0]) * r2[1])\n y = (math.cos(r1[0]) * r1[1]) + (math.cos(r2[0]) * r2[1])\n \n angle = 0.5 * math.pi - math.atan2(y, x)\n length = math.hypot(x, y)\n\n return (angle, length)", "def orient(ps, origin, v1, v2):\r\n \r\n ps = np.vstack((v1, v2, ps))\r\n ps -= origin\r\n if ps[0][1] == 0:\r\n a = 0\r\n else:\r\n a = np.arcsin(np.fabs(ps[0][1]) / np.sqrt(ps[0][1] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][1] < 0 <= ps[0][2]) or (ps[0][1] > 0 > ps[0][2]):\r\n a = 2 * np.pi - a\r\n if (ps[0][1] * np.sin(a) + ps[0][2] * np.cos(a)) < 0:\r\n a = np.pi + a \r\n ps = rotate(a, ps, 0)\r\n if ps[0][0] == 0:\r\n b = 0\r\n else:\r\n b = np.arcsin(np.fabs(ps[0][0]) / np.sqrt(ps[0][0] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][0] < 0 and ps[0][2] < 0) or (ps[0][0] > 0 and ps[0][2] > 0):\r\n b = 2 * np.pi - b\r\n if (ps[0][2] * np.cos(b) - ps[0][0] * np.sin(b)) < 0:\r\n b = np.pi + b\r\n ps = rotate(b, ps, 1)\r\n if ps[1][1] == 0:\r\n c = 0\r\n else:\r\n c = np.arcsin(np.fabs(ps[1][1]) / np.sqrt(ps[1][0]**2 + ps[1][1]**2))\r\n if (ps[1][0] < 0 and ps[1][1] < 0) or (ps[1][0] > 0 and ps[1][1] > 0):\r\n c = 2 * np.pi - c\r\n if (ps[1][0] * np.cos(c) - ps[1][1] * np.sin(c)) < 0:\r\n c = np.pi + c\r\n ps = rotate(c, ps, 2)\r\n return ps[2:]", "def get_dihedral(p0,p1,p2,p3,unit):\n if unit == 'Ang':\n p0 = p0*0.529177249\n p1 = p1*0.529177249\n p2 = p2*0.529177249\n p3 = p3*0.529177249\n\n b0 = -1.0*(p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n # normalize b1 so that it does not influence magnitude of vector\n # rejections that come next\n b1 /= linalg.norm(b1)\n\n # vector rejections\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = b0 - dot(b0, b1)*b1\n w = b2 - dot(b2, b1)*b1\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = dot(v, w)\n y = dot(cross(b1, v), w)\n return degrees(arctan2(y, x))\n\n #q1 = subtract(p1,p0) # b - a \n #q2 = subtract(p2,p1) # c - b \n #q3 = subtract(p3,p2) # d - c\n #print(q1,q2)\n\n #q1_x_q2 = cross(q1,q2) \n #q2_x_q3 = cross(q2,q3)\n\n #n1 = q1_x_q2/sqrt(dot(q1_x_q2,q1_x_q2)) \n #n2 = q2_x_q3/sqrt(dot(q2_x_q3,q2_x_q3))\n\n #u1 = n2\n #u3 = q2/(sqrt(dot(q2,q2))) \n #u2 = cross(u3,u1)\n\n #cos_theta = dot(n1,u1)\n #sin_theta = dot(n1,u2)\n ## Calculate theta\n #theta = -atan2(sin_theta,cos_theta)\n ## it is different from atan2 from fortran math.atan2(y,x)\n #theta_deg = degrees(theta)\n #return(theta_deg)", "def get_best_quaternion(coordlist1, coordlist2):\n M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n\n if len(coordlist1) <= len(coordlist2):\n number = len(coordlist1)\n else:\n number = len(coordlist2)\n for i in xrange(number):\n aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))\n M = M + aaa\n\n N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])\n N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])\n N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])\n N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])\n N12 = float(M[1][:, 2] - M[2][:, 1])\n N13 = float(M[2][:, 0] - M[0][:, 2])\n N14 = float(M[0][:, 1] - M[1][:, 0])\n N21 = float(N12)\n N23 = float(M[0][:, 1] + M[1][:, 0])\n N24 = float(M[2][:, 0] + M[0][:, 2])\n N31 = float(N13)\n N32 = float(N23)\n N34 = float(M[1][:, 2] + M[2][:, 1])\n N41 = float(N14)\n N42 = float(N24)\n N43 = float(N34)\n\n N = np.matrix([[N11, N12, N13, N14],\n [N21, N22, N23, N24],\n [N31, N32, N33, N34],\n [N41, N42, N43, N44]])\n\n values, vectors = np.linalg.eig(N)\n w = list(values)\n quat = vectors[:, w.index(max(w))]\n quat = np.array(quat).reshape(-1, ).tolist()\n return quat, max(w)", "def orientation(sign1L, sign2L):\n p_p = 0\n m_m = 0\n p_m = 0\n m_p = 0\n for index in range(len(sign1L)):\n sign1 = sign1L[index]\n sign2 = sign2L[index]\n if sign1 in [\"+\", \"-\"] and sign2 in [\"+\", \"-\"]:\n if sign1 == sign2:\n if sign1 == \"+\":\n p_p += 1\n elif sign1 == \"-\":\n m_m += 1\n else:\n if sign1 == \"+\" and sign2 == \"-\":\n p_m += 1\n elif sign1 == \"-\" and sign2 == \"+\":\n m_p += 1\n same_strand = p_p + m_m\n opposite_strand = p_m + m_p\n convergent = p_m\n divergent = m_p\n return p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent", "def euler_to_rot3d(psi, theta, phi):\n rphi = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n rtheta = np.array([[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n rpsi = np.array([[np.cos(psi), -np.sin(psi), 0],\n [np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(rpsi, np.dot(rtheta, rphi))", "def matrix_discrepancy(centers1, rotations1, centers2, rotations2,\n angle_weight=None, center_weight=None):\n\n n = len(centers1)\n\n assert len(centers2) == n\n assert len(rotations1) == n\n assert len(rotations2) == n\n assert n >= 2\n\n if not angle_weight:\n angle_weight = 1.0\n\n if not center_weight:\n center_weight = [1.0] * n\n\n if n > 2:\n rotation_matrix, new1, mean1, RMSD, sse = \\\n besttransformation_weighted(centers1, centers2, center_weight)\n\n orientation_error = 0\n angles = []\n for r1, r2 in zip(rotations1, rotations2):\n if r1.shape[0] > 0 and r2.shape[0] > 0:\n angle = angle_of_rotation(np.dot(np.dot(rotation_matrix, r2),\n np.transpose(r1)))\n orientation_error += np.square(angle)\n discrepancy = np.sqrt(sse + angle_weight * orientation_error) / n\n\n else:\n\n R1 = np.dot(np.transpose(rotations1[1]),rotations1[0]) # rotation from nt 0 to nt1 of 1st motif\n R2 = np.dot(np.transpose(rotations2[0]),rotations2[1]) # rotation from nt 0 to nt1 of 2nd motif\n\n rot1 = np.dot(R1,R2)\n ang1 = angle_of_rotation(rot1)\n\n rot2 = np.dot(np.transpose(R1),np.transpose(R2))\n ang2 = angle_of_rotation(rot2)\n\n T1 = np.dot(centers1[1] - centers1[0],rotations1[0])\n T2 = np.dot(centers1[0] - centers1[1],rotations1[1])\n\n S1 = np.dot(centers2[1] - centers2[0],rotations2[0])\n S2 = np.dot(centers2[0] - centers2[1],rotations2[1])\n\n D1 = T1-S1\n D2 = T2-S2\n\n discrepancy = np.sqrt(D1[0]**2 + D1[1]**2 + D1[2]**2 + (angle_weight*ang1)**2)\n discrepancy += np.sqrt(D2[0]**2 + D2[1]**2 + D2[2]**2 + (angle_weight*ang2)**2)\n\n# factor = 1/(4*np.sqrt(2)) # factor to multiply by discrepancy; faster to precompute?\n\n discrepancy = discrepancy * 0.17677669529663687\n\n return discrepancy", "def get_phi_chi_omega(self, angles):\n (phi, chi, omega) = angles[0:3]\n return (phi, chi, omega)", "def rotacija_pravouglog_trougla_oko_hipotenuze(s2, s1):\r\n c = math.sqrt(s2 * s2 + s1 * s1)\r\n povrsina_trougla= (s2 * s1) / 2\r\n hc = (2 * povrsina_trougla) / c\r\n H1 = math.sqrt(s1 * s1 - hc * hc)\r\n H2 = math.sqrt(s2 * s2 - hc * hc)\r\n pi= 3.14\r\n povrsina = hc * pi * (s1 + s2)\r\n zapremina = (hc * hc * pi * (H1 + H2)) / 3\r\n return povrsina, zapremina", "def align_z_along_fixed_ends(xyz_file_parts, fixed_beginning, fixed_end):\n\n\t\tmolecule_axis = [xyz_file_parts[-1][1,fixed_end],xyz_file_parts[-1][2,fixed_end],xyz_file_parts[-1][3,fixed_end]]\n\n\n\t\tangle = np.arccos(molecule_axis[2]/np.linalg.norm(molecule_axis))\n\t\ttheta = angle\n\n\t\tif(angle != 0):\n\t\t\t#calculate rotation axis\n\t\t\trotation_axis = np.cross(molecule_axis, [0.0,0.0,1.0])\n\t\t\trotation_axis = 1.0/np.linalg.norm(rotation_axis)*rotation_axis\n\t\t\tu = rotation_axis\n\n\t\t\t#calculate rotation_matrix\n\t\t\trotation_matrix = [[np.cos(theta) + u[0]**2 * (1-np.cos(theta)), u[0] * u[1] * (1-np.cos(theta)) - u[2] * np.sin(theta), u[0] * u[2] * (1 - np.cos(theta)) + u[1] * np.sin(theta)],\n\t [u[0] * u[1] * (1-np.cos(theta)) + u[2] * np.sin(theta), np.cos(theta) + u[1]**2 * (1-np.cos(theta)), u[1] * u[2] * (1 - np.cos(theta)) - u[0] * np.sin(theta)],\n\t [u[0] * u[2] * (1-np.cos(theta)) - u[1] * np.sin(theta), u[1] * u[2] * (1-np.cos(theta)) + u[0] * np.sin(theta), np.cos(theta) + u[2]**2 * (1-np.cos(theta))]]\n\n\t\t\tfor j in range(0, len(xyz_file_parts)):\n\t\t\t\tfor i in range(0, len(xyz_file_parts[j][1,:])):\n\t\t\t\t\t \n\t\t\t\t\tvector_to_rotate = [round(float(xyz_file_parts[j][1,i]),5),round(float(xyz_file_parts[j][2,i]),5),round(float(xyz_file_parts[j][3,i]),5)]\n\t\t\t\t\trotated_vector = np.asmatrix(rotation_matrix)*np.asmatrix(vector_to_rotate).T\n\t\t\t\t\txyz_file_parts[j][1,i] = round(rotated_vector[0,0],5)\n\t\t\t\t\txyz_file_parts[j][2,i] = round(rotated_vector[1,0],5)\n\t\t\t\t\txyz_file_parts[j][3,i] = round(rotated_vector[2,0],5)\n\t\t\treturn xyz_file_parts\n\t\telse:\n\t\t\treturn xyz_file_parts", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def Misorien2FZ3(m1,m2,symtype='Cubic'):\n if symtype!='Cubic':\n print \"only calculate axis for cubic symmetry\"\n return\n m2=np.matrix(m2)\n dm=(m2.T).dot(m1)\n ops=GetSymRotMat(symtype)\n angle=6.3\n for op1 in ops:\n for op2 in ops:\n tmp=op2.dot(dm.dot(op1))\n cosangle=0.5*(tmp.trace()-1)\n cosangle=min(0.9999999,cosangle)\n cosangle=max(-0.9999999,cosangle)\n newangle=np.arccos(cosangle)\n if newangle<angle:\n w,W=np.linalg.eig(tmp)\n i=np.where(abs(np.real(w)-1)<1e-8)[0]\n direction=np.asarray(np.real(W[:,i[-1]])).squeeze()\n if abs(direction[0])>1e-8:\n sina=(tmp[2,1]-tmp[1,2])/2.0/direction[0]\n if sina<0:\n direction=-direction\n if direction[0]>direction[1] and direction[1]>direction[2] and direction[2]>0:\n angle=newangle\n axis=direction\n tmp=tmp.T\n w,W=np.linalg.eig(tmp)\n i=np.where(abs(np.real(w)-1)<1e-8)[0]\n direction=np.asarray(np.real(W[:,i[-1]])).squeeze()\n if abs(direction[0])>1e-8:\n sina=(tmp[2,1]-tmp[1,2])/2.0/direction[0]\n if sina<0:\n direction=-direction\n if direction[0]>direction[1] and direction[1]>direction[2] and direction[2]>0:\n angle=newangle\n axis=direction\n\n return axis,angle", "def CombineRotation(a, b):\n # Use matrix multiplication: c = b*a.\n # We put 'b' on the left and 'a' on the right because,\n # just like when you use a matrix M to rotate a vector V,\n # you put the M on the left in the product M*V.\n # We can think of this as 'b' rotating all the 3 column vectors in 'a'.\n\n return RotationMatrix([\n [\n b.rot[0][0]*a.rot[0][0] + b.rot[1][0]*a.rot[0][1] + b.rot[2][0]*a.rot[0][2],\n b.rot[0][1]*a.rot[0][0] + b.rot[1][1]*a.rot[0][1] + b.rot[2][1]*a.rot[0][2],\n b.rot[0][2]*a.rot[0][0] + b.rot[1][2]*a.rot[0][1] + b.rot[2][2]*a.rot[0][2]\n ],\n [\n b.rot[0][0]*a.rot[1][0] + b.rot[1][0]*a.rot[1][1] + b.rot[2][0]*a.rot[1][2],\n b.rot[0][1]*a.rot[1][0] + b.rot[1][1]*a.rot[1][1] + b.rot[2][1]*a.rot[1][2],\n b.rot[0][2]*a.rot[1][0] + b.rot[1][2]*a.rot[1][1] + b.rot[2][2]*a.rot[1][2]\n ],\n [\n b.rot[0][0]*a.rot[2][0] + b.rot[1][0]*a.rot[2][1] + b.rot[2][0]*a.rot[2][2],\n b.rot[0][1]*a.rot[2][0] + b.rot[1][1]*a.rot[2][1] + b.rot[2][1]*a.rot[2][2],\n b.rot[0][2]*a.rot[2][0] + b.rot[1][2]*a.rot[2][1] + b.rot[2][2]*a.rot[2][2]\n ]\n ])", "def rotate_points(points, a, b):\n if points.ndim == 1:\n points = points[None, :]\n\n a = normalize_vector(a)\n b = normalize_vector(b)\n k = normalize_vector(np.cross(a, b))\n theta = angle_between_vectors(a, b, normalize=False)\n\n points_rot = points * np.cos(theta) \\\n + np.cross(k, points) * np.sin(theta) \\\n + k * np.dot(k, points.T).reshape(-1, 1) * (1 - np.cos(theta))\n return points_rot", "def compute_sign(k1, k2):\n\n def ordering_sign(permu, weights):\n \"\"\"Returns the exponent of the Koszul sign of the given\n permutation acting on the elements of degrees given by the\n list of weights\n\n \"\"\"\n sign_exp = 0\n for idx, j in enumerate(permu):\n to_add = [weights[permu.index(i)] for\n i in permu[idx + 1:] if i < j]\n sign_exp += weights[idx] * sum(to_add)\n return sign_exp % 2\n\n def action_sign(ordered_k1, ordered_weights):\n \"\"\"Given a ordered tuple [1,..,1, 2,...,2, ..., r,...,r]\n and weights [w_1, w_2, ..., w_{r+d}] of the same length, gives\n the koszul sign obtained by inserting from the left a weight 1\n operator between equal consecutive elements.\n\n \"\"\"\n sign_exp = 0\n for idx, (i, j) in enumerate(pairwise(ordered_k1)):\n if i == j:\n sign_exp += sum(ordered_weights[:idx + 1])\n return sign_exp % 2\n\n sign_exp = 0\n weights = [e.dimension % 2 for e in k2]\n inv_ordering_permu = [pair[0] for pair in\n sorted(enumerate(k1), key=itemgetter(1))]\n ordering_permu = tuple(inv_ordering_permu.index(i)\n for i in range(len(inv_ordering_permu)))\n sign_exp += ordering_sign(ordering_permu, weights)\n ordered_k1 = list(sorted(k1))\n ordered_weights = [weights[i] for i in inv_ordering_permu]\n sign_exp += action_sign(ordered_k1, ordered_weights)\n return (-1) ** sign_exp", "def my_rodriguez_rotation(P, k, theta):\n\n P_rot = np.zeros((len(P), 3))\n\n for i in range(len(P)):\n P_rot[i] = P[i]*np.cos(theta) + np.cross(k, P[i])*np.sin(theta) + \\\n k*np.dot(k, P[i])*(1.0-np.cos(theta))\n\n return P_rot", "def get_RotationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment \n self.rotInPlane = len(TiltSeries_.Projections) * [0.]\n kk = 0\n for Proj in TiltSeries_.Projections:\n self.rotInPlane[kk] = Proj.rotInPlane\n kk = kk + 1\n return self.rotInPlane", "def rotation_2d(points, angles):\n rot_sin = np.sin(angles)\n rot_cos = np.cos(angles)\n rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])\n return np.einsum('aij,jka->aik', points, rot_mat_T)", "def cphase(h1, h2):\n\n for h in (h1, h2):\n h.assert_ket_space()\n\n field = h1.base_field\n\n d = h1.dim()\n if h2.dim() != d:\n raise HilbertError('spaces must be of the same dimension')\n\n ret = (h1*h2).O.array()\n for (j, a) in enumerate(h1.index_iter()):\n for (k, b) in enumerate(h2.index_iter()):\n ret[{ h1: a, h1.H: a, h2: b, h2.H: b }] = field.fractional_phase(j*k, d)\n return ret", "def ROT(hist1: np.ndarray, hist2: np.ndarray, *args):\n\n rot = ROTSolver(hist1, hist2)\n _, coupling = rot.solve(plot=False)\n\n return coupling", "def compare_quaternion_lists(new_quats, ref_quats, tol=0.05):\n nquats = len(ref_quats) # 3 for multiruby case\n\n # FIRST CHECK THAT NUMBER OF ORIENTATIONS MATCHES\n if len(new_quats) != nquats:\n raise RuntimeError(\n \"Incorrect number of orientations found; should be %d\" % nquats\n + \", currently found %d\" % len(new_quats)\n )\n\n # NEXT CHECK THE ACTUAL MISORIENTATIONS\n # !!! order may be different\n for i, nq in enumerate(new_quats):\n ang, mis = misorientation(nq.reshape(4, 1), ref_quats.T)\n if np.min(ang) > np.radians(tol):\n raise RuntimeError(\n \"Misorientation for test orientation %d \" % i\n + \"is greater than threshold\"\n )", "def _findrotationmatrix(ccdata1, ccdata2):\n natoms = ccdata1.natom\n J = np.zeros((3, 3), dtype=np.float)\n\n for i in range(natoms):\n J += np.outer(ccdata1.atomcoords[0][i], ccdata2.atomcoords[0][i])\n\n U, s, V = np.linalg.svd(J)\n\n R = np.transpose(np.dot(V, np.transpose(U)))\n\n return R", "def test_rotation(self, tol):\n theta = 0.98\n S = symplectic.rotation(theta)\n expected = np.block([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n np.allclose(S, expected, atol=tol, rtol=0)", "def rotation(X, Y, C, S) :\n Xrot = X*C + Y*S \n Yrot = Y*C - X*S \n return Xrot, Yrot", "def angle( nt1, nt2, nt3 ):\n if vector(nt1, nt2) == [0,0]:\n print(\"nt1\", nt1.seqpos, \" at \", nt1.x, nt1.y, \" is at the same position as nt2\", nt2.seqpos)\n if vector(nt2, nt3) == [0,0]:\n print(\"nt2\", nt2.seqpos, \" at \", nt2.x, nt2.y, \" is at the same position as nt3\", nt3.seqpos)\n #print(vector(nt1, nt2), vector(nt2, nt3))\n if vectors_close(vector(nt1, nt2), vector(nt2, nt3)):\n # These vectors are identical and that is messing with the ability to call two things parallel?\n return 180.0\n return 180.0 - math.degrees(math.acos(dot(vector(nt1, nt2), vector(nt2, nt3)) / (mod(vector(nt1, nt2)) * mod(vector(nt2, nt3)))))", "def ik3(xyz_array):\n # Eqn 1\n theta_1 = np.arctan2(xyz_array[1], xyz_array[0])\n # Eqn 2\n r1 = np.hypot(xyz_array[0], xyz_array[1])\n # Eqn 3\n r2 = xyz_array[2] - link_lengths[0]\n # Eqn 4\n phi2 = np.arctan2(r2, r1)\n # Eqn 5\n r3 = np.hypot(r1, r2)\n # Eqn 6\n num6 = np.power(link_lengths[2], 2) - \\\n np.power(link_lengths[1], 2) - np.power(r3, 2)\n den6 = -2 * link_lengths[1] * r3\n phi1 = np.arccos(num6 / den6)\n # Eqn 7\n # theta_2 = phi2 - phi1 # elbow down\n theta_2 = phi2 + phi1\n # Eqn 8\n num8 = np.power(r3, 2) - \\\n np.power(link_lengths[1], 2) - np.power(link_lengths[2], 2)\n den8 = -2 * link_lengths[1] * link_lengths[2]\n phi3 = np.arccos(num8 / den8)\n # Eqn 9\n # theta_3 = pi - phi3 # elbow down\n theta_3 = -(np.pi - phi3)\n # Output Joint Angles\n theta_1 = np.rad2deg(theta_1)\n theta_2 = np.rad2deg(theta_2)\n theta_3 = np.rad2deg(theta_3)\n joint_rotations = np.array([theta_1, theta_2, theta_3])\n return joint_rotations", "def couple_so3(L1,L2,orbital=False):\n imax=int(2*min(L1,L2))\n if orbital==True:\n if (L1+L2)%1==0:\n L3set=[int(L1+L2-i) for i in range(0,imax+1)]\n else:\n print \"error message\"\n L3set=[]\n else:\n L3set=[L1+L2-i for i in range(0,imax+1)] \n return L3set", "def Euler2Rotation(phi, theta, psi):\n # only call sin and cos once for each angle to speed up rendering\n c_phi = np.cos(phi)\n s_phi = np.sin(phi)\n c_theta = np.cos(theta)\n s_theta = np.sin(theta)\n c_psi = np.cos(psi)\n s_psi = np.sin(psi)\n\n R_roll = np.array([[1, 0, 0],\n [0, c_phi, s_phi],\n [0, -s_phi, c_phi]])\n R_pitch = np.array([[c_theta, 0, -s_theta],\n [0, 1, 0],\n [s_theta, 0, c_theta]])\n R_yaw = np.array([[c_psi, s_psi, 0],\n [-s_psi, c_psi, 0],\n [0, 0, 1]])\n\n R = R_roll @ R_pitch @ R_yaw # inertial to body (Equation 2.4 in book)\n return R.T # transpose to return body to inertial", "def find_angle(p1, p2, p3):\n\n BAx = p1[0] - p2[0]\n BAy = p1[1] - p2[1]\n\n BCx = p3[0] - p2[0]\n BCy = p3[1] - p2[1]\n\n a = [BAx, BAy]\n b = [BCx, BCy]\n a_mag = np.linalg.norm(a)\n b_mag = np.linalg.norm(b)\n\n theta = np.arccos(np.dot(a, b) / (a_mag * b_mag))\n\n return math.degrees(theta)", "def equations(t, y, args):\n x1 = y[0] # x1 = theta1, angle\n x2 = y[1] # x2 = theta2, angle\n p1 = y[2] # p1 = omega1, angular velocity\n p2 = y[3] # p2 = omega2, angular velocity\n l1, l2, m1, m2, g = args\n x1_eq = p1\n x2_eq = p2\n p1_eq = -((g*(2*m1+m2)*np.sin(x1)+m2*(g*np.sin(x1-2*x2)+2*(l2*p2**2+l1*p1 **\n 2*np.cos(x1-x2))*np.sin(x1-x2)))/(2*l1*(m1+m2-m2*(np.cos(x1-x2))**2)))\n p2_eq = ((l1*(m1+m2)*p1**2+g*(m1+m2)*np.cos(x1)+l2*m2*p2**2 *\n np.cos(x1-x2))*np.sin(x1-x2))/(l2*(m1+m2-m2*(np.cos(x1-x2))**2))\n return [x1_eq, x2_eq, p1_eq, p2_eq]", "def test_quaternion_hamilton():\n q_ij = pr.concatenate_quaternions(pr.q_i, pr.q_j)\n assert_array_equal(pr.q_k, q_ij)\n q_ijk = pr.concatenate_quaternions(q_ij, pr.q_k)\n assert_array_equal(-pr.q_id, q_ijk)", "def htm0_3(joint_rotations):\n # H0_1\n r0_1 = np.dot(rot_x(90), rot_y(joint_rotations[0]))\n d0_1 = transl(0, 0, a1)\n h0_1 = htm(r0_1, d0_1)\n\n # H1_2\n r1_2 = rot_z(joint_rotations[1])\n x1_2 = a2*np.cos(np.radians(joint_rotations[1]))\n y1_2 = a2*np.sin(np.radians(joint_rotations[1]))\n z1_2 = 0\n d1_2 = transl(x1_2, y1_2, z1_2)\n h1_2 = htm(r1_2, d1_2)\n\n # H2_3\n r2_3 = rot_z(joint_rotations[2])\n x2_3 = a3*np.cos(np.radians(joint_rotations[2]))\n y2_3 = a3*np.sin(np.radians(joint_rotations[2]))\n z2_3 = 0\n d2_3 = transl(x2_3, y2_3, z2_3)\n h2_3 = htm(r2_3, d2_3)\n\n # H0_3\n h0_2 = np.dot(h0_1, h1_2)\n h0_3 = np.dot(h0_2, h2_3)\n return h0_3", "def set_rama_angles(moving_h, angles, direction_forward=True, check_omega=False):\n # print \"angles\", angles\n # STOP()\n result_h = moving_h.deep_copy()\n result_h.reset_atom_i_seqs()\n fixed_omega = False\n phi_psi_atoms = utils.get_phi_psi_atoms(moving_h, omega=True)\n assert len(phi_psi_atoms) == len(angles), \"%d != %d\" % (len(phi_psi_atoms), len(angles))\n if not direction_forward:\n phi_psi_atoms.reverse()\n angles.reverse()\n for ps_atoms, target_angle_pair in zip(phi_psi_atoms, angles):\n phi_psi_pair = ps_atoms[0]\n # print \"phi_psi_pair\", phi_psi_pair\n omega = ps_atoms[2]\n phi_psi_angles = utils.get_pair_angles(phi_psi_pair)\n # print \"ps_atoms, target_angle_pair\", phi_psi_angles, target_angle_pair\n # phi\n if target_angle_pair[0] is not None and phi_psi_angles[0] is not None:\n rotation_angle = -phi_psi_angles[0]+target_angle_pair[0]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][1],\n phi_psi_pair[0][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # psi\n if target_angle_pair[1] is not None and phi_psi_angles[1] is not None:\n rotation_angle = -phi_psi_angles[1]+target_angle_pair[1]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[1][1],\n phi_psi_pair[1][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # omega\n if omega is not None and abs(abs(omega)-180) > 10 and check_omega:\n rotation_angle= -omega+180\n # print \"Omega rotation:\", omega, rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][0],\n phi_psi_pair[0][1],\n angle=rotation_angle,\n direction_forward=direction_forward)\n fixed_omega = True\n # print utils.list_rama_outliers_h(result_h)\n # result_h.write_pdb_file(file_name=\"variant_%s.pdb\" % direction_forward)\n # STOP()\n return result_h, fixed_omega", "def testCalculateRotationDiff(self):\n # Test identity\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertEqual(result, 0.0)\n # Test arbitrary rotation\n rot1 = numpy.array(\n [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n rot2 = numpy.array(\n [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])\n transform1[0:3, 0:3] = numpy.matmul(transform1[0:3, 0:3], rot1)\n transform2[0:3, 0:3] = numpy.matmul(transform2[0:3, 0:3], rot2)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Order shouldn't matter\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Test when the angle is pi\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n transform2[0, 0] = -1.0\n transform2[1, 1] = -1.0\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n # It might wrap to -pi, so check the absolute value\n self.assertAlmostEqual(abs(result), numpy.pi, 8)\n # Test an extreme value\n transform2 = -1.0 * numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(abs(result), numpy.pi)", "def compute_egocentric_delta(p1, r1, p2, r2):\n x1, y1, z1 = p1\n x2, y2, z2 = p2\n theta_1 = compute_heading_from_quaternion(r1)\n theta_2 = compute_heading_from_quaternion(r2)\n\n D_rho = math.sqrt((x1 - x2) ** 2 + (z1 - z2) ** 2)\n D_phi = (\n math.atan2(x2 - x1, -z2 + z1) - theta_1\n ) # counter-clockwise rotation about Y from -Z to X\n D_theta = theta_2 - theta_1\n\n return (D_rho, D_phi, D_theta)", "def matrix_discrepancy_cutoff(centers1, rotations1, centers2, rotations2, cutoff,\n angle_weight=None, center_weight=None):\n\n n = len(centers1)\n\n assert len(centers2) == n\n assert len(rotations1) == n\n assert len(rotations2) == n\n assert n >= 2\n\n if not angle_weight:\n angle_weight = [1] * n\n\n if not center_weight:\n center_weight = [1] * n\n\n if n > 2:\n # solve np.sqrt(sse + orientation_error) / n > cutoff to give sse + orientation_error > (n*cutoff)**2\n temp_cutoff = (n * cutoff)**2\n\n rotation_matrix, new1, mean1, RMSD, sse = \\\n besttransformation_weighted(centers1, centers2, center_weight)\n\n total_error = sse\n if total_error > temp_cutoff:\n return None\n\n angles = []\n for r1, r2 in zip(rotations1, rotations2):\n if r1.shape[0] > 0 and r2.shape[0] > 0:\n angle = angle_of_rotation(np.dot(np.dot(rotation_matrix, r2),\n np.transpose(r1)))\n total_error += np.square(angle)\n if total_error > temp_cutoff:\n return None\n\n discrepancy = np.sqrt(total_error) / n\n\n else:\n\n R1 = np.dot(np.transpose(rotations1[1]),rotations1[0]) # rotation from nt 0 to nt1 of 1st motif\n R2 = np.dot(np.transpose(rotations2[0]),rotations2[1]) # rotation from nt 0 to nt1 of 2nd motif\n\n rot1 = np.dot(R1,R2) #\n ang1 = angle_of_rotation(rot1)\n\n rot2 = np.dot(np.transpose(R1),np.transpose(R2))\n ang2 = angle_of_rotation(rot2)\n\n T1 = np.dot(centers1[1] - centers1[0],rotations1[0])\n T2 = np.dot(centers1[0] - centers1[1],rotations1[1])\n\n S1 = np.dot(centers2[1] - centers2[0],rotations2[0])\n S2 = np.dot(centers2[0] - centers2[1],rotations2[1])\n\n D1 = T1-S1\n D2 = T2-S2\n\n discrepancy = np.sqrt(D1[0]**2 + D1[1]**2 + D1[2]**2 + (angle_weight[0]*ang1)**2)\n discrepancy += np.sqrt(D2[0]**2 + D2[1]**2 + D2[2]**2 + (angle_weight[0]*ang2)**2)\n\n# factor = 1/(4*np.sqrt(2)) # factor to multiply by discrepancy; faster to precompute?\n\n discrepancy = discrepancy * 0.17677669529663687\n\n return discrepancy", "def fk3(joint_rotations):\n h0_3 = htm0_3(joint_rotations)\n x0_3 = h0_3[0, 3]\n y0_3 = h0_3[1, 3]\n z0_3 = h0_3[2, 3]\n d0_3 = [x0_3, y0_3, z0_3]\n return d0_3", "def get_admix_angles (self, t):\n \n Omega, dOmega_dt = self.get_Omega(t)\n Delta, dDelta_dt = self.get_Delta(t)\n\n theta_admix_1 = arctan2(Omega, Delta)\n theta_admix_2 = arctan2(sqrt(2) * Omega, Delta)\n \n return theta_admix_1, theta_admix_2", "def task_three():\n # Formula to calculate:\n # q2 = (z2 / z1) * (R + T * nt / d) * q1\n # where R - rotation\n # T - translation\n # nt - normal vertex of common plane of the 3d points\n # d - shift of the common plane\n # and (R + T * nt / d) required homography transform\n # defined up to constant\n # But in our case T == 0\n tetta = 30 * np.pi / 180\n H = np.array([[1, 0, 0],\n [0, np.cos(tetta), -np.sin(tetta)],\n [0, np.sin(tetta), np.cos(tetta)],\n ])\n print(\"Homography transformation:\\n\", H)", "def determine_rotation(arm, d, tip_data, rot_data):\n n_t = np.zeros(3)\n for this_n_t in tip_data['pos_ntip_wrt_r']:\n n_t += this_n_t\n n_t /= len(tip_data['pos_ntip_wrt_r'])\n print(\"Our n_t to use in this stage: {}\".format(n_t))\n\n K = len(rot_data['pos_ntip_wrt_s'])\n errors_zyz = []\n errors_zyx = []\n\n for k in range(K):\n lhs = rot_data['pos_ntip_wrt_s'][k]\n t_st = rot_data['pos_tool_wrt_s_code'][k]\n ypr = rot_data['rot_tool_wrt_s_code'][k]\n yaw, pitch, roll = ypr[0], ypr[1], ypr[2]\n\n # R_zyz\n R_z1 = U.rotation_matrix_3x3_axis(angle=roll, axis='z')\n R_y = U.rotation_matrix_3x3_axis(angle=pitch, axis='y')\n R_z2 = U.rotation_matrix_3x3_axis(angle=yaw, axis='z')\n R_zyz = R_z2.dot(R_y).dot(R_z1)\n\n # R_zyx\n R_x = U.rotation_matrix_3x3_axis(angle=roll, axis='x')\n R_y = U.rotation_matrix_3x3_axis(angle=pitch, axis='y')\n R_z = U.rotation_matrix_3x3_axis(angle=yaw, axis='z')\n R_zyx = R_z.dot(R_y).dot(R_x)\n\n # Evaluate!\n rhs_zyz = t_st + R_zyz.dot( n_t )\n rhs_zyx = t_st + R_zyx.dot( n_t )\n err_zyz = np.linalg.norm(lhs - rhs_zyz)\n err_zyx = np.linalg.norm(lhs - rhs_zyx)\n errors_zyz.append( err_zyz )\n errors_zyx.append( err_zyx )\n print(\"\\nerr_zyz: {:.3f} for {}-th sample\".format(err_zyz, k))\n print(\"err_zyx: {:.3f} for {}-th sample\".format(err_zyx, k))\n print(\"R_zyz:\\n{}\".format(R_zyz))\n print(\"R_zyx:\\n{}\".format(R_zyx))\n\n print(\"\\nDone with evaluation!\")\n print(\"zyz has avg error {:.5f}\".format(np.mean(errors_zyz)))\n print(\"zyx has avg error {:.5f}\".format(np.mean(errors_zyx)))", "def test_quaternion_diff():\n random_state = np.random.RandomState(0)\n\n for _ in range(5):\n q1 = pr.random_quaternion(random_state)\n q2 = pr.random_quaternion(random_state)\n a_diff = pr.quaternion_diff(q1, q2) # q1 - q2\n q_diff = pr.quaternion_from_axis_angle(a_diff)\n q3 = pr.concatenate_quaternions(q_diff, q2) # q1 - q2 + q2\n pr.assert_quaternion_equal(q1, q3)", "def getOblateXRotMatrix(aStar1, aStar2):\n aStarDir = aStar2 - a1\n aStarmid = aStar1 + 0.5 * aStarDir\n kath = np.sqrt((aStarDir[0] * aStarDir[0] + aStarDir[1] * aStarDir[1]) / 4.0)\n phi = np.arctan( abs( (aStarDir[2]/2) / kath) )\n octantAStar2 = octant(aStar2)\n if octantAStar2 in [1, 2, 7, 8]: #\n phi = -phi\n print \"phi =\" , np.rad2deg(phi)\n RotX = np.matrix( [ [ 1.0, 0.0 , 0.0 ],\n [ 0.0, np.cos(phi), np.sin(phi)],\n [ 0.0, -np.sin(phi), np.cos(phi)]\n ])\n return np.asarray( RotX )", "def differential_rotation(lat, A, B, C):\n \n lat_deg = lat * np.pi/180.\n return A + B * np.sin(lat_deg)**2 + C * np.sin(lat_deg)**4", "def _rotations_guard_clauses(R1: Union[list, np.ndarray], R2: Union[list, np.ndarray]) -> None:\n for label, rotation_matrix in zip(['R1', 'R2'], [R1, R2]):\n if not isinstance(rotation_matrix, (list, np.ndarray)):\n raise TypeError(f\"{label} must be an array. Got {type(rotation_matrix)}\")\n r1, r2 = np.copy(R1), np.copy(R2)\n for rotation_matrix in [r1, r2]:\n if rotation_matrix.shape[-2:] != (3, 3):\n raise ValueError(f\"Rotation matrices must be of shape (N, 3, 3) or (3, 3). Got {rotation_matrix.shape}.\")\n r1_shape, r2_shape = r1.shape, r2.shape\n if r1_shape != r2_shape:\n raise ValueError(f\"Cannot compare R1 of shape {r1_shape} and R2 of shape {r2_shape}.\")", "def next_rotation(q_1: Q, q_2: Q) -> Q:\n q_1.check_representations(q_2)\n\n if not math.isclose(q_1.t, q_2.t):\n raise ValueError(f\"Oops, to be a rotation, the first values must be the same: {q_1.t} != {q_2.t}\")\n\n if not math.isclose(norm_squared(q_1).t, norm_squared(q_2).t):\n raise ValueError(f\"Oops, the norm squared of these two are not equal: {norm_squared(q_1).t} != {norm_squared(q_2).t}\")\n\n next_rot = product(q_1, q_2)\n v_abs_q_1 = abs_of_vector(q_1).t\n next_vector_normalized = normalize(vector_q(next_rot), v_abs_q_1)\n next_vector_normalized.t = q_1.t\n\n return next_vector_normalized", "def test_from_two_vectors(self):\r\n for _ in range(20):\r\n v0 = np.random.randn(3)\r\n v1 = np.random.randn(3)\r\n v0 /= np.linalg.norm(v0)\r\n v1 /= np.linalg.norm(v1)\r\n\r\n q = from_two_vectors(v0, v1)\r\n R = to_rotation(q)\r\n\r\n zero_vec = R @ v0 - v1\r\n self.assertAlmostEqual(np.linalg.norm(zero_vec), 0.0)\r\n\r\n q_inv = from_two_vectors(v1, v0)\r\n R_inv = to_rotation(q_inv)\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def get_phi_chi_omega(self, angles):\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n return (phi, chi, omega)", "def euler2dcm(angles, rot_seq='zyx'):\n dcm = np.zeros((3, 3))\n cangle = np.cos(angles)\n sangle = np.sin(angles)\n rot_seq = rot_seq.lower()\n if rot_seq == 'zyx':\n dcm[0, 0] = cangle[1]*cangle[0]\n dcm[0, 1] = cangle[1]*sangle[0]\n dcm[0, 2] = -sangle[1]\n dcm[1, 0] = sangle[2]*sangle[1]*cangle[0] - cangle[2]*sangle[0]\n dcm[1, 1] = sangle[2]*sangle[1]*sangle[0] + cangle[2]*cangle[0]\n dcm[1, 2] = cangle[1]*sangle[2]\n dcm[2, 0] = sangle[1]*cangle[2]*cangle[0] + sangle[0]*sangle[2]\n dcm[2, 1] = sangle[1]*cangle[2]*sangle[0] - cangle[0]*sangle[2]\n dcm[2, 2] = cangle[1]*cangle[2]\n return dcm\n elif rot_seq == 'zyz':\n dcm[0, 0] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n dcm[0, 1] = sangle[0]*cangle[2]*cangle[1] + cangle[0]*sangle[2]\n dcm[0, 2] = -sangle[1]*cangle[2]\n dcm[1, 0] = -cangle[0]*cangle[1]*sangle[2] - sangle[0]*cangle[2]\n dcm[1, 1] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n dcm[1, 2] = sangle[1]*sangle[2]\n dcm[2, 0] = cangle[0]*sangle[1]\n dcm[2, 1] = sangle[0]*sangle[1]\n dcm[2, 2] = cangle[1]\n return dcm\n elif rot_seq == 'zxy':\n dcm[0, 0] = cangle[2]*cangle[0] - sangle[1]*sangle[2]*sangle[0]\n dcm[0, 1] = cangle[2]*sangle[0] + sangle[1]*sangle[2]*cangle[0]\n dcm[0, 2] = -sangle[2]*cangle[1]\n dcm[1, 0] = -cangle[1]*sangle[0]\n dcm[1, 1] = cangle[1]*cangle[0]\n dcm[1, 2] = sangle[1]\n dcm[2, 0] = sangle[2]*cangle[0] + sangle[1]*cangle[2]*sangle[0]\n dcm[2, 1] = sangle[2]*sangle[0] - sangle[1]*cangle[2]*cangle[0]\n dcm[2, 2] = cangle[1]*cangle[2]\n return dcm\n elif rot_seq == 'zxz':\n dcm[0, 0] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n dcm[0, 1] = cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[2]\n dcm[0, 2] = sangle[1]*sangle[2]\n dcm[1, 0] = -sangle[0]*cangle[2]*cangle[1] - cangle[0]*sangle[2]\n dcm[1, 1] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n dcm[1, 2] = sangle[1]*cangle[2]\n dcm[2, 0] = sangle[0]*sangle[1]\n dcm[2, 1] = -cangle[0]*sangle[1]\n dcm[2, 2] = cangle[1]\n return dcm\n elif rot_seq == 'yxz':\n dcm[0, 0] = cangle[0]*cangle[2] + sangle[1]*sangle[0]*sangle[2]\n dcm[0, 1] = cangle[1]*sangle[2]\n dcm[0, 2] = -sangle[0]*cangle[2] + sangle[1]*cangle[0]*sangle[2]\n dcm[1, 0] = -cangle[0]*sangle[2] + sangle[1]*sangle[0]*cangle[2]\n dcm[1, 1] = cangle[1]*cangle[2]\n dcm[1, 2] = sangle[0]*sangle[2] + sangle[1]*cangle[0]*cangle[2]\n dcm[2, 0] = sangle[0]*cangle[1]\n dcm[2, 1] = -sangle[1]\n dcm[2, 2] = cangle[1]*cangle[0]\n return dcm\n elif rot_seq == 'yxy':\n dcm[0, 0] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n dcm[0, 1] = sangle[1]*sangle[2]\n dcm[0, 2] = -cangle[0]*cangle[1]*sangle[2] - sangle[0]*cangle[2]\n dcm[1, 0] = sangle[0]*sangle[1]\n dcm[1, 1] = cangle[1]\n dcm[1, 2] = cangle[0]*sangle[1]\n dcm[2, 0] = sangle[0]*cangle[2]*cangle[1] + cangle[0]*sangle[2]\n dcm[2, 1] = -sangle[1]*cangle[2]\n dcm[2, 2] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n return dcm\n elif rot_seq == 'yzx':\n dcm[0, 0] = cangle[0]*cangle[1]\n dcm[0, 1] = sangle[1]\n dcm[0, 2] = -sangle[0]*cangle[1]\n dcm[1, 0] = -cangle[2]*cangle[0]*sangle[1] + sangle[2]*sangle[0]\n dcm[1, 1] = cangle[1]*cangle[2]\n dcm[1, 2] = cangle[2]*sangle[0]*sangle[1] + sangle[2]*cangle[0]\n dcm[2, 0] = sangle[2]*cangle[0]*sangle[1] + cangle[2]*sangle[0]\n dcm[2, 1] = -sangle[2]*cangle[1]\n dcm[2, 2] = -sangle[2]*sangle[0]*sangle[1] + cangle[2]*cangle[0]\n return dcm\n elif rot_seq == 'yzy':\n dcm[0, 0] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n dcm[0, 1] = sangle[1]*cangle[2]\n dcm[0, 2] = -sangle[0]*cangle[2]*cangle[1] - cangle[0]*sangle[2]\n dcm[1, 0] = -cangle[0]*sangle[1]\n dcm[1, 1] = cangle[1]\n dcm[1, 2] = sangle[0]*sangle[1]\n dcm[2, 0] = cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[2]\n dcm[2, 1] = sangle[1]*sangle[2]\n dcm[2, 2] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n return dcm\n elif rot_seq == 'xyz':\n dcm[0, 0] = cangle[1]*cangle[2]\n dcm[0, 1] = sangle[0]*sangle[1]*cangle[2] + cangle[0]*sangle[2]\n dcm[0, 2] = -cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[2]\n dcm[1, 0] = -cangle[1]*sangle[2]\n dcm[1, 1] = -sangle[0]*sangle[1]*sangle[2] + cangle[0]*cangle[2]\n dcm[1, 2] = cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[2]\n dcm[2, 0] = sangle[1]\n dcm[2, 1] = -sangle[0]*cangle[1]\n dcm[2, 2] = cangle[0]*cangle[1]\n return dcm\n elif rot_seq == 'xyx':\n dcm[0, 0] = cangle[1]\n dcm[0, 1] = sangle[0]*sangle[1]\n dcm[0, 2] = -cangle[0]*sangle[1]\n dcm[1, 0] = sangle[1]*sangle[2]\n dcm[1, 1] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n dcm[1, 2] = cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[2]\n dcm[2, 0] = sangle[1]*cangle[2]\n dcm[2, 1] = -sangle[0]*cangle[2]*cangle[1] - cangle[0]*sangle[2]\n dcm[2, 2] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n return dcm\n elif rot_seq == 'xzy':\n dcm[0, 0] = cangle[2]*cangle[1]\n dcm[0, 1] = cangle[0]*cangle[2]*sangle[1] + sangle[0]*sangle[2]\n dcm[0, 2] = sangle[0]*cangle[2]*sangle[1] - cangle[0]*sangle[2]\n dcm[1, 0] = -sangle[1]\n dcm[1, 1] = cangle[0]*cangle[1]\n dcm[1, 2] = sangle[0]*cangle[1]\n dcm[2, 0] = sangle[2]*cangle[1]\n dcm[2, 1] = cangle[0]*sangle[1]*sangle[2] - sangle[0]*cangle[2]\n dcm[2, 2] = sangle[0]*sangle[1]*sangle[2] + cangle[0]*cangle[2]\n return dcm\n elif rot_seq == 'xzx':\n dcm[0, 0] = cangle[1]\n dcm[0, 1] = cangle[0]*sangle[1]\n dcm[0, 2] = sangle[0]*sangle[1]\n dcm[1, 0] = -sangle[1]*cangle[2]\n dcm[1, 1] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n dcm[1, 2] = sangle[0]*cangle[2]*cangle[1] + cangle[0]*sangle[2]\n dcm[2, 0] = sangle[1]*sangle[2]\n dcm[2, 1] = -cangle[0]*cangle[1]*sangle[2] - sangle[0]*cangle[2]\n dcm[2, 2] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n return dcm\n else:\n return False", "def find_rotation_and_seed_unique(q1, q2, closed=0, lam=0.0, rotation=True, method=\"DP\"):\n\n n, T = q1.shape\n\n scl = 4.\n minE = 1000\n if closed == 1:\n end_idx = int(floor(T/scl))\n scl = 4\n else:\n end_idx = 0\n \n for ctr in range(0, end_idx+1):\n if closed == 1:\n q2n = shift_f(q2, scl*ctr)\n else:\n q2n = q2.copy()\n \n if rotation:\n q2new, R = find_best_rotation(q1, q2n)\n else:\n q2new = q2n\n R = eye(n)\n\n # Reparam\n if norm(q1-q2new,'fro') > 0.0001:\n gam = optimum_reparam_curve(q2new, q1, lam, method)\n gamI = uf.invertGamma(gam)\n p2n = q_to_curve(q2n)\n p2n = group_action_by_gamma_coord(p2n,gamI)\n q2new = curve_to_q(p2n)[0]\n if closed == 1:\n q2new = project_curve(q2new)\n else:\n gamI = linspace(0,1,T)\n \n tmp = innerprod_q2(q1,q2new)\n if tmp > 1:\n tmp = 1\n if tmp < -1:\n tmp = -1\n Ec = arccos(tmp)\n if Ec < minE:\n Rbest = R\n q2best = q2new\n gamIbest = gamI\n minE = Ec\n\n return (q2best, Rbest, gamIbest)", "def compute_RotMats(a, e, t):\n assert len(a)==len(e)==len(t)\n M = len(a)\n\n # camera intrinsic matrix\n Rz = np.zeros((M, 3, 3), dtype=np.float32)\n Rx = np.zeros((M, 3, 3), dtype=np.float32)\n Rz2 = np.zeros((M, 3, 3), dtype=np.float32)\n # C = np.zeros((M, 1, 3), dtype=np.float32)\n # initial \"1\" positions.\n Rz [:, 2, 2] = 1\n Rx [:, 0, 0] = 1\n Rz2[:, 2, 2] = 1\n #\n R = np.zeros((M, 3, 3), dtype=np.float32)\n\n # convert to radius\n a = a * pi / 180.\n e = e * pi / 180.\n t = t * pi / 180.\n\n # update a, e, t\n a = -a\n e = pi/2.+e\n t = -t\n #\n sin_a, cos_a = np.sin(a), np.cos(a)\n sin_e, cos_e = np.sin(e), np.cos(e)\n sin_t, cos_t = np.sin(t), np.cos(t)\n\n # ===========================\n # rotation matrix\n # ===========================\n \"\"\"\n # [Transposed]\n Rz = np.matrix( [[ cos(a), sin(a), 0 ], # model rotate by a\n [ -sin(a), cos(a), 0 ],\n [ 0, 0, 1 ]] )\n # [Transposed]\n Rx = np.matrix( [[ 1, 0, 0 ], # model rotate by e\n [ 0, cos(e), sin(e) ],\n [ 0, -sin(e), cos(e) ]] )\n # [Transposed]\n Rz2= np.matrix( [[ cos(t), sin(t), 0 ], # camera rotate by t (in-plane rotation)\n [-sin(t), cos(t), 0 ],\n [ 0, 0, 1 ]] )\n R = Rz2*Rx*Rz\n \"\"\"\n\n # Original matrix (None-transposed.)\n # No need to set back to zero?\n Rz[:, 0, 0], Rz[:, 0, 1] = cos_a, -sin_a\n Rz[:, 1, 0], Rz[:, 1, 1] = sin_a, cos_a\n #\n Rx[:, 1, 1], Rx[:, 1, 2] = cos_e, -sin_e\n Rx[:, 2, 1], Rx[:, 2, 2] = sin_e, cos_e\n #\n Rz2[:, 0, 0], Rz2[:, 0, 1] = cos_t, -sin_t\n Rz2[:, 1, 0], Rz2[:, 1, 1] = sin_t, cos_t\n # R = Rz2*Rx*Rz\n R[:] = np.einsum(\"nij,njk,nkl->nil\", Rz2, Rx, Rz)\n\n # Return the original matrix without transpose!\n return R", "def addVectors((angle1, length1), (angle2, length2)):\n x = math.sin(angle1) * length1 + math.sin(angle2) * length2\n y = math.cos(angle1) * length1 + math.cos(angle2) * length2\n length = math.hypot(x,y)\n angle = 0.5 * math.pi - math.atan2(y,x)\n return (angle, length)", "def find_inplane_to_match(phiA,thetaA,phiB,thetaB,psiA=0,psiB=0):\n\t#from math import pi, sqrt, cos, acos, sin\n\n\tRA = Transform({'type': 'spider', 'phi': phiA, 'theta': thetaA, 'psi': psiA})\n\tRB = Transform({'type': 'spider', 'phi': phiB, 'theta': thetaB, 'psi': psiB})\n\tRBT = RB.transpose()\n\tRABT = RA * RBT\n\n\tRABTeuler = RABT.get_rotation('spider')\n\tRABTphi = RABTeuler['phi']\n\tRABTtheta = RABTeuler['theta']\n\tRABTpsi = RABTeuler['psi']\n\n\t#deg_to_rad = pi/180.0\n\t#thetaAR = thetaA*deg_to_rad\n\t#thetaBR = thetaB*deg_to_rad\n\t#phiAR = phiA*deg_to_rad\n\t#phiBR = phiB *deg_to_rad\n\n\t#d12=cos(thetaAR)*cos(thetaBR) + sin(thetaAR)*sin(thetaBR)*cos(phiAR-phiBR)\n\treturn (-RABTpsi-RABTphi),RABTtheta # 180.0*acos(d12)/pi;", "def compute_error_minimizing_rotation(Points1, Points2):\r\n #TODO: implement me\r\n\r\n H_1_1 = 0\r\n H_1_2 = 0\r\n H_2_1 = 0\r\n H_2_2 = 0\r\n\r\n for t in range(1, len(Points1)):\r\n H_1_1 = H_1_1 + (Points1[t][0] * Points2[t][0])\r\n H_1_2 = H_1_2 + (Points1[t][1] * Points2[t][0])\r\n H_2_1 = H_2_1 + (Points1[t][0] * Points2[t][1])\r\n H_2_2 = H_2_2 + (Points1[t][1] * Points2[t][1])\r\n\r\n H = [[H_1_1,H_1_2],[H_2_1,H_2_2]]\r\n\r\n U, S, V = numpy.linalg.svd(H)\r\n\r\n V = numpy.transpose(V)\r\n\r\n R_1_1 = (U[0][0] * V[0][0]) +((U[0][1] * V[1][0]))\r\n R_1_2 = (U[0][0] * V[0][1]) +((U[0][1] * V[1][1]))\r\n R_2_1 = (U[1][0] * V[0][0]) +((U[1][1] * V[1][0]))\r\n R_2_2 = (U[1][0] * V[0][1]) +((U[1][1] * V[1][1]))\r\n\r\n R = [[R_1_1,R_1_2],[R_2_1,R_2_2]]\r\n\r\n return R", "def rotationDetermination(self):\n \n for index, row in enumerate(self.magdata):\n if index > 11 and index < (len(self.magdata) - 12):\n br1 = [row[0] for row in self.magdata[(index-12):(index-2)]]\n bt1 = [row[1] for row in self.magdata[(index-12):(index-2)]]\n bn1 = [row[2] for row in self.magdata[(index-12):(index-2)]]\n b1 = np.matrix((np.mean(br1), np.mean(bt1), np.mean(bn1)))\n\n br2 = [row[0] for row in self.magdata[(index+2):(index+12)]]\n bt2 = [row[1] for row in self.magdata[(index+2):(index+12)]]\n bn2 = [row[2] for row in self.magdata[(index+2):(index+12)]]\n b2 = np.matrix((np.mean(br2), np.mean(bt2), np.mean(bn2)))\n\n theta = np.arccos(np.dot(b1,b2.T)/(np.linalg.norm(b1)*np.linalg.norm(b2)))*180/np.pi\n\n self.detections.rotations.append(theta[0,0])\n self.detections.rotationTimeTags.append(self.timestamps[index])\n \n\n## self.b1 = b1\n## self.b2 = b2\n self.detections.rotationBoundary=[]\n if len(self.detections.rotations) != 0:\n \n for index, theta in enumerate(self.detections.rotations):\n if index > 0:\n if theta > 30 and self.detections.rotations[index-1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])\n if index < len(self.detections.rotations)-1:\n if theta > 30 and self.detections.rotations[index+1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])", "def calculate_theta_vals(self) -> None:\n A = np.zeros(self.num_points) # Inappropriate names, but they mirror Knuth's notation.\n B = np.zeros(self.num_points)\n C = np.zeros(self.num_points)\n D = np.zeros(self.num_points)\n R = np.zeros(self.num_points)\n\n # Calculate the entries of the five vectors.\n # Skip first and last point if path is non-cyclic.\n point_ind = range(self.num_points) if self.is_cyclic else range(1, self.num_points - 1)\n for i in point_ind:\n z_h = self.points[i - 1]\n z_i = self.points[i]\n z_j = self.points[(i + 1) % self.num_points]\n\n A[i] = z_h.alpha / (z_i.beta ** 2 * z_h.d_val)\n B[i] = (3 - z_h.alpha) / (z_i.beta ** 2 * z_h.d_val)\n C[i] = (3 - z_j.beta) / (z_i.alpha ** 2 * z_i.d_val)\n D[i] = z_j.beta / (z_i.alpha ** 2 * z_i.d_val)\n R[i] = -B[i] * z_i.psi - D[i] * z_j.psi\n\n # Set up matrix M such that the soln. Mx = R are the theta values.\n M = np.zeros((self.num_points, self.num_points))\n for i in range(self.num_points):\n # Fill i-th row of M\n M[i][i - 1] = A[i]\n M[i][i] = B[i] + C[i]\n M[i][(i + 1) % self.num_points] = D[i]\n\n # Special formulas for first and last rows of M with non-cyclic paths.\n if not self.is_cyclic:\n # First row of M\n alpha_0 = self.points[0].alpha\n beta_1 = self.points[1].beta\n xi_0 = (alpha_0 ** 2 * self.begin_curl) / beta_1 ** 2\n M[0][0] = alpha_0 * xi_0 + 3 - beta_1\n M[0][1] = (3 - alpha_0) * xi_0 + beta_1\n R[0] = -((3 - alpha_0) * xi_0 + beta_1) * self.points[1].psi\n # Last row of M\n alpha_n_1 = self.points[-2].alpha\n beta_n = self.points[-1].beta\n xi_n = (beta_n ** 2 * self.end_curl) / alpha_n_1 ** 2\n M[-1][-2] = (3 - beta_n) * xi_n + alpha_n_1\n M[-1][-1] = (beta_n * xi_n + 3 - alpha_n_1)\n R[-1] = 0\n\n # Solve for theta values.\n thetas = np.linalg.solve(M, R)\n for i, point in enumerate(self.points):\n point.theta = thetas[i]", "def final_homography(pts1, pts2, feats1, feats2):\n\n #\n # Your code here\n #\n\n idxs1, idxs2 = find_matches(feats1, feats2)\n ransac_return = ransac(pts1[idxs1], pts2[idxs2])\n\n return ransac_return, idxs1, idxs2", "def get_phi_chi_omega(self, angles):\n (phi) = angles[0]\n chi = np.deg2rad(self.chi)\n omega = np.deg2rad(self.omega)\n return (phi, chi, omega)", "def ab2_timestep_rotation(sphere_positions, sphere_rotations, new_sphere_positions, new_sphere_rotations, Oa_out, Oa_out_previous, timestep):\r\n combined_Oa_for_ab2 = 1.5 * Oa_out - 0.5 * Oa_out_previous\r\n return euler_timestep_rotation(sphere_positions, sphere_rotations, new_sphere_positions, new_sphere_rotations, combined_Oa_for_ab2, timestep)", "def get_z(theta, phi):\n return math.cos(phi)/math.tan(theta/2) + 1j*math.sin(phi)/math.tan(theta/2)", "def find_best_rotation(q1, q2, allow_reflection = False, only_xy = False):\n if q1.ndim != 2 or q2.ndim != 2:\n raise Exception(\"This only supports curves of shape (N,M) for N dimensions and M samples\")\n\n n = q1.shape[0]\n\n # if only_xy, strip everything but the x and y coordinates of q1 and q2\n if only_xy:\n _q1 = q1[0:2, :]\n _q2 = q2[0:2, :]\n else:\n _q1 = q1\n _q2 = q2\n\n _n = _q1.shape[0]\n A = _q1@_q2.T\n U, s, Vh = svd(A)\n S = eye(_n)\n\n # if reflections are not allowed and the determinant of A is negative,\n # then the entry corresponding to the smallest singular value is negated\n # as in the Kabsch algorithm\n if det(A) < 0 and not allow_reflection:\n S[-1, -1] = -1 # the last entry of the matrix becomes -1\n\n _R = U@S@Vh # optimal\n \n # if only_xy, the top left block of the matrix is _R and the rest is identity matrix\n if only_xy:\n R = eye(n)\n R[0:2, 0:2] = _R\n else:\n R = _R\n \n q2new = R@q2\n\n return (q2new, R)", "def get_phi_chi_omega(self, angles):\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n return (phi, chi, omega)", "def get_phi_chi_omega(self, angles):\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n return (phi, chi, omega)", "def even_angles(delta = 15.0, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'S', phiEqpsi = \"Minus\", symmetry='c1'):\n\n\tfrom math import pi, sqrt, cos, acos, tan, sin\n\tfrom utilities import even_angles_cd\n\tfrom string import lower,split\n\tangles = []\n\tsymmetryLower = symmetry.lower()\n\tsymmetry_string = split(symmetry)[0]\n\tif (symmetry_string[0] == \"c\"):\n\t\tif(phi2 == 359.99):\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2/int(symmetry_string[1:]), method, phiEqpsi)\n\t\t\tif(int(symmetry_string[1:]) > 1):\n\t\t\t\tif( int(symmetry_string[1:])%2 ==0):\n\t\t\t\t\tqt = 360.0/int(symmetry_string[1:])\n\t\t\t\telse:\n\t\t\t\t\tqt = 180.0/int(symmetry_string[1:])\n\t\t\t\tn = len(angles)\n\t\t\t\tfor i in xrange(n):\n\t\t\t\t\tt = n-i-1\n\t\t\t\t\tif(angles[t][1] == 90.0):\n\t\t\t\t\t\tif(angles[t][0] >= qt): del angles[t]\n\t\telse:\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)\n\telif(symmetry_string[0] == \"d\"):\n\t\tif(phi2 == 359.99):\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, 360.0/2/int(symmetry_string[1:]), method, phiEqpsi)\n\t\t\tif (int(symmetry_string[1:])%2 == 0):\n\t\t\t\tqt = 360.0/2/int(symmetry_string[1:])\n\t\t\telse:\n\t\t\t\tqt = 180.0/2/int(symmetry_string[1:])\n\t\t\tn = len(angles)\n\t\t\tfor i in xrange(n):\n\t\t\t\tt = n-i-1\n\t\t\t\tif(angles[t][1] == 90.0):\n\t\t\t\t\tif(angles[t][0] >= qt): del angles[t]\n\t\telse:\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)\n\telif(symmetry_string[0] == \"s\"):\n\t\n\t#if symetry is \"s\", deltphi=delta, theata intial=theta1, theta end=90, delttheta=theta2\n\t\t# for helical, theta1 cannot be 0.0\n\t\tif theta1 > 90.0:\n\t\t\tERROR('theta1 must be less than 90.0 for helical symmetry', 'even_angles', 1)\n\t\tif theta1 == 0.0: theta1 =90.0\n\t\ttheta_number = int((90.0 - theta1)/theta2)\n\t\t#for helical, symmetry = s or scn\n\t\tcn = int(symmetry_string[2:])\n\t\tfor j in xrange(theta_number,-1, -1):\n\n\t\t\tif( j == 0):\n\t\t\t\tif (symmetry_string[1] ==\"c\"):\n\t\t\t\t\tif cn%2 == 0:\n\t\t\t\t\t\tk=int(359.99/cn/delta)\n\t\t\t\t\telse:\n\t\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\telif (symmetry_string[1] ==\"d\"):\n\t\t\t\t\tif cn%2 == 0:\n\t\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\t\telse:\n\t\t\t\t\t\tk=int(359.99/4/cn/delta)\n\t\t\t\telse:\n\t\t\t\t\tERROR(\"For helical strucutre, we only support scn and sdn symmetry\",\"even_angles\",1)\n\n\t\t\telse:\n\t\t\t\tif (symmetry_string[1] ==\"c\"):\n\t\t\t\t\tk=int(359.99/cn/delta)\n\t\t\t\telif (symmetry_string[1] ==\"d\"):\n\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\t\t\t\n\t\t\tfor i in xrange(k+1):\n\t\t\t\t\tangles.append([i*delta,90.0-j*theta2,90.0])\n\n\n\telse : # This is very close to the Saff even_angles routine on the asymmetric unit;\n\t\t# the only parameters used are symmetry and delta\n\t\t# The formulae are given in the Transform Class Paper\n\t\t# The symmetric unit \t\tnVec=[]; # x,y,z triples\n\t\t# is defined by three points b,c, v of Fig 2 of the paper\n\t\t# b is (0,0,1)\n\t\t# c is (sin(thetac),0,cos(thetac))\n\t\t# a is (sin(thetac)cos(Omega),sin(thetac)cos(Omega),cos(thetac))\n\t\t# f is the normalized sum of all 3\n\t\t\n\t\t# The possible symmetries are in list_syms\n\t\t# The symmetry determines thetac and Omega\n\t\t# The spherical area is Omega - pi/3; \n\t\t# should be equal to 4 *pi/(3*# Faces)\n\t\t#\t\t\n\t\t# symmetry ='tet'; delta = 6;\n\n\t\tscrunch = 0.9 # closeness factor to eliminate oversampling corners\n\t\t#nVec=[] # x,y,z triples\n\n\t\tpiOver = pi/180.0\n\t\tCount=0 # used to count the number of angles\n\t\t\n\t\tif (symmetryLower[0:3] ==\"tet\"): m=3.0; fudge=0.9 # fudge is a factor used to adjust phi steps\n\t\telif (symmetryLower[0:3] ==\"oct\"): m=4.0; fudge=0.8\n\t\telif (symmetryLower[0:3] ==\"ico\"): m=5.0; fudge=0.95\n\t\telse: ERROR(\"allowable symmetries are cn, dn, tet, oct, icos\",\"even_angles\",1)\n\n\t\tn=3.0\n\t\tOmegaR = 2.0*pi/m; cosOmega= cos(OmegaR)\n\t\tEdges = 2.0*m*n/(2.0*(m+n)-m*n)\n\t\tFaces = 2*Edges/n\n\t\tArea = 4*pi/Faces/3.0; # also equals 2*pi/3 + Omega\n\t\tcosthetac = cosOmega/(1-cosOmega)\n\t\tdeltaRad= delta*pi/180\n\t\tNumPoints = int(Area/(deltaRad*deltaRad))\n\t\tfheight = 1/sqrt(3)/ (tan(OmegaR/2.0))\n\n\t\tz0 = costhetac # initialize loop\t\n\t\tz = z0\n\t\tphi = 0\n\t\tDeltaz = (1-costhetac)/(NumPoints-1)\n\n\t\t#[1, phi,180.0*acos(z)/pi,0.]\n\t\tanglesLast = [phi,180.0*acos(z)/pi,0.]\n\t\tangles.append(anglesLast)\n\t\tnLast= [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]\n\t\tnVec = []\n\t\tnVec.append(nLast)\n\n\t\tCount +=1\n\n\t\tfor k in xrange(1,(NumPoints-1)):\n\t\t\tz=z0 + Deltaz*k # Is it higher than fhat or lower\n\t\t\tr= sqrt(1-z*z)\n\t\t\tif (z > fheight): phiRmax= OmegaR/2.0\n\t\t\tif (z<= fheight):\n\t\t\t\tthetaR = acos(z); \n\t\t\t\tcosStuff = (cos(thetaR)/sin(thetaR))*sqrt(1. - 2 *cosOmega);\n\t\t\t\tphiMax = 180.0*( OmegaR - acos(cosStuff))/pi\n\t\t\tangleJump = fudge* delta/r\n\t\t\tphi = (phi + angleJump)%(phiMax)\n\t\t\tanglesNew = [phi,180.0*acos(z)/pi,0.];\n\t\t\tnNew = [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]\n\t\t\tdiffangleVec = [acos(nNew[0]*nVec[k][0] + nNew[1]*nVec[k][1] + nNew[2]*nVec[k][2] ) for k in xrange(Count)] \n\t\t\tdiffMin = min(diffangleVec)\n\t\t\tif (diffMin>angleJump*piOver *scrunch):\n\t\t\t\tCount +=1\n\t\t\t\tangles.append(anglesNew)\n\t\t\t\tnVec.append(nNew)\n\t\t\t\t#[Count, phi,180*acos(z)/pi,0.]\n\t\t\tanglesLast = anglesNew\n\t\t\tnLast=nNew\n\n\t\tangles.append( [0.0, 0.0, 0.0] )\n\t\tnLast= [ 0., 0. , 1.]\n\t\tnVec.append(nLast)\n\t\tif(theta2 == 180.0): angles.append( [0.0, 180.0, 0.0] )\n\t\t\n\t\tangles.reverse()\n\t\tif(phiEqpsi == \"Minus\"):\n\t\t\tfor i in xrange(len(angles)): angles[i][2] = (720.0-angles[i][0])%360.0\n\t\t#print(Count,NumPoints)\n\t\t\n#\t\tlook at the distribution\n#\t\tCount =len(angles); piOver= pi/180.0;\n#\t\tphiVec = [ angles[k][0] for k in range(Count)] ;\n#\t\tthetaVec = [ angles[k][1] for k in range(Count)] ;\n#\t\txVec = [sin(piOver * angles[k][1]) * cos(piOver * angles[k][0]) for k in range(Count) ]\n#\t\tyVec = [sin(piOver * angles[k][1])* sin(piOver * angles[k][0]) for k in range(Count) ]\n#\t\tzVec = [cos(piOver * angles[k][1]) for k in range(Count) ]\n#\t\tpylab.plot(yVec,zVec,'.'); pylab.show()\n\n\n\treturn angles", "def angle_between(i1, j1, i2, j2):\n\n dot_product = i1 * i2 + j1 * j2\n magnitude1 = np.sqrt(i1 ** 2 + j1 ** 2)\n magnitude2 = np.sqrt(i2 ** 2 + j2 ** 2)\n\n theta = np.arccos(dot_product / (magnitude1 * magnitude2))\n\n return np.rad2deg(theta).round(3)", "def match_objects(coords1,coords2,tail1=(),tail2=(),accuracy=1.):\n acc2=accuracy**2\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n for j in range(np1):\n #dist=add.reduce((a1[:,j,NewAxis]-a2[:,:])**2)\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n if dist[i_min]<acc2:match[j]=i_min\n good=greater_equal(match,0)\n n1=compress(good,list(range(np1))) \n match=compress(good,match)\n a1=compress(good,a1)\n salida=list(a1)\n for i in range(nt1):\n if type(tail1[i][0])==type('si'):\n t=[]\n for j in n1: t.append(tail1[i][j])\n else:\n t=take(tail1[i],n1)\n salida.append(t)\n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n return salida", "def vrrotvec(a,b):\n a = normalize(a)\n b = normalize(b)\n ax = normalize(np.cross(a,b))\n angle = np.arccos(np.minimum(np.dot(a,b),[1]))\n if not np.any(ax):\n absa = np.abs(a)\n mind = np.argmin(absa)\n c = np.zeros((1,3))\n c[mind] = 0\n ax = normalize(np.cross(a,c))\n r = np.concatenate((ax,angle))\n return r", "def rotate_toward(initial_vector, final_vector, other_vectors, degrees: float = 5):\n final_vector = normalize(final_vector)\n initial_vector = normalize(initial_vector)\n cos_phi = np.dot(initial_vector, final_vector)\n theta = degrees * np.pi / 180\n cos_theta = np.cos(theta)\n phi = np.arccos(cos_phi)\n if phi < theta:\n return (rotate(initial_vector, final_vector, other_vectors), True)\n cos_phi_theta = np.cos(phi - theta)\n A = np.asarray([[cos_phi, 1], [1, cos_phi]])\n B = np.asarray([cos_phi_theta, cos_theta])\n x = np.linalg.solve(A, B)\n rotated_vector = x[0] * initial_vector + x[1] * final_vector\n return (rotate(initial_vector, rotated_vector, other_vectors), False)", "def __get_angle(self, names, vecA, vecB):\n pivot = max(names, key=names.count)\n\n if names[0] != pivot: # Atoms needs to be order to pick vectors correctly\n vecA = vecA * -1\n\n if names[2] != pivot:\n vecB = vecB * -1\n\n radians = vecA.AngleTo(vecB)\n angle = 180 / math.pi * radians\n\n return angle", "def Misorien2FZ1(m1,m2,symtype='Cubic'):\n m2=np.matrix(m2)\n ops=GetSymRotMat(symtype)\n angle=6.3\n for op in ops:\n tmp=m1.dot(op.dot(m2.T))\n cosangle=0.5*(tmp.trace()-1)\n cosangle=min(0.9999999999,cosangle)\n cosangle=max(-0.99999999999,cosangle)\n newangle=np.arccos(cosangle)\n if newangle<angle:\n angle=newangle\n oRes=tmp\n return oRes,angle", "def get_theta(p1,p2):\r\n \r\n dy = p1[1] - p2[1]\r\n dx = p1[0] - p2[0]\r\n theta = atan2(dy,dx)\r\n return theta", "def test_angle_angle_equivalent(pcff):\n expected = {\n \"K\": \"5.9863\",\n \"reference\": \"6\",\n \"Theta10\": \"116.0640\",\n \"Theta20\": \"116.0640\",\n }\n\n i = \"c5\"\n j = \"cp\"\n k = \"c_1\"\n l = \"c5\" # noqa: E741\n ptype, key, form, parameters = pcff.angle_angle_parameters(i, j, k, l)\n assert ptype == \"equivalent\"\n assert key == (\"cp\", \"cp\", \"c_1\", \"cp\")\n assert parameters == expected", "def dsdt(s, t, a, P, I, G, U, lengths, masses, k1, k2):\r\n d = len(a) + 1\r\n theta = s[2:2 + d]\r\n vcm = s[2 + d:4 + d]\r\n dtheta = s[4 + d:]\r\n\r\n cth = np.cos(theta)\r\n sth = np.sin(theta)\r\n rVx = np.dot(P, -sth * dtheta)\r\n rVy = np.dot(P, cth * dtheta)\r\n Vx = rVx + vcm[0]\r\n Vy = rVy + vcm[1]\r\n\r\n Vn = -sth * Vx + cth * Vy\r\n Vt = cth * Vx + sth * Vy\r\n\r\n EL1 = np.dot((v1Mv2(-sth, G, cth) + v1Mv2(cth, G, sth)) * dtheta[None, :]\r\n + (v1Mv2(cth, G, -sth) + v1Mv2(sth, G, cth)) * dtheta[:, None], dtheta)\r\n EL3 = np.diag(I) + v1Mv2(sth, G, sth) + v1Mv2(cth, G, cth)\r\n EL2 = - k1 * np.dot((v1Mv2(-sth, P.T, -sth) + v1Mv2(cth, P.T, cth)) * lengths[None, :], Vn) \\\r\n - k1 * np.power(lengths, 3) * dtheta / 12. \\\r\n - k2 * \\\r\n np.dot((v1Mv2(-sth, P.T, cth) + v1Mv2(cth, P.T, sth))\r\n * lengths[None, :], Vt)\r\n ds = np.zeros_like(s)\r\n ds[:2] = vcm\r\n ds[2:2 + d] = dtheta\r\n ds[2 + d] = - \\\r\n (k1 * np.sum(-sth * Vn) + k2 * np.sum(cth * Vt)) / np.sum(masses)\r\n ds[3 + d] = - \\\r\n (k1 * np.sum(cth * Vn) + k2 * np.sum(sth * Vt)) / np.sum(masses)\r\n ds[4 + d:] = np.linalg.solve(EL3, EL1 + EL2 + np.dot(U, a))\r\n return ds", "def find_rotation_and_seed_coord(beta1, beta2, closed=0, rotation=True, method=\"DP\"):\n\n n, T = beta1.shape\n q1 = curve_to_q(beta1)[0]\n scl = 4.\n minE = 1000\n if closed == 1:\n end_idx = int(floor(T/scl))\n scl = 4\n else:\n end_idx = 0\n \n for ctr in range(0, end_idx+1):\n if closed == 1:\n beta2n = shift_f(beta2, scl*ctr)\n else:\n beta2n = beta2\n \n if rotation:\n beta2new, R = find_best_rotation(beta1, beta2n)\n else:\n beta2new = beta2n\n R = eye(n)\n q2new = curve_to_q(beta2new)[0]\n\n # Reparam\n if norm(q1-q2new,'fro') > 0.0001:\n gam = optimum_reparam_curve(q2new, q1, 0.0, method)\n gamI = uf.invertGamma(gam)\n beta2new = group_action_by_gamma_coord(beta2new,gamI)\n q2new = curve_to_q(beta2new)[0]\n if closed == 1:\n q2new = project_curve(q2new)\n else:\n gamI = linspace(0,1,T)\n \n tmp = innerprod_q2(q1,q2new)\n if tmp > 1:\n tmp = 1\n if tmp < -1:\n tmp = -1\n Ec = arccos(tmp)\n if Ec < minE:\n Rbest = R\n beta2best = beta2new\n q2best = q2new\n gamIbest = gamI\n minE = Ec\n\n return (beta2best, q2best, Rbest, gamIbest)", "def get_angles_of_triangle(a, b, c):\n\tRAD_TO_DEG = 180/math.pi\n\t\n\talpha = b-a\n\tbeta = c-a\n\tcos_alpha = alpha.dot(beta) / (alpha.size * beta.size)\n\tangle1 = math.acos(cos_alpha) * RAD_TO_DEG\n\t\n\talpha = a-b\n\tbeta = c-b\n\tcos_alpha = alpha.dot(beta) / (alpha.size * beta.size)\n\tangle2 = math.acos(cos_alpha) * RAD_TO_DEG\n\t\n\tangle3 = 180 - angle2 - angle1\n\n\t# Debug.text_2d(25, 400, \"Angle 1: \" + str(round(angle1, 2)))\n\t# Debug.text_2d(25, 430, \"Angle 2: \" + str(round(angle2, 2)))\n\t# Debug.text_2d(25, 460, \"Angle 3: \" + str(round(angle3, 2)))\n\n\t# Debug.line_2d_3d(a, b)\n\t# Debug.line_2d_3d(b, c)\n\t# Debug.line_2d_3d(c, a)\n\n\treturn [angle1, angle2, angle3]", "def operator(self, params: Tensor) -> Tensor:\n theta, phi = params\n # calculate entries\n a: Tensor = exp(1j * phi) * cos(theta / 2)\n b: Tensor = sin(theta / 2)\n c: Tensor = -b\n d: Tensor = exp(-1j * phi) * cos(theta / 2)\n # construct the rows of the rotation matrix\n r1: Tensor = cat((a.view(1), b.view(1)))\n r2: Tensor = cat((c.view(1), d.view(1)))\n # build and return the rotation matrix\n rot: Tensor = cat((r1, r2)).view(2, 2)\n return rot" ]
[ "0.61534816", "0.6057964", "0.6041564", "0.6022013", "0.5908641", "0.5882892", "0.5800431", "0.57707256", "0.5706791", "0.56642485", "0.56599987", "0.5649005", "0.5628651", "0.5623927", "0.56015086", "0.5588204", "0.55626535", "0.5548194", "0.55238026", "0.5522772", "0.5512592", "0.5511358", "0.54814446", "0.54579234", "0.5439676", "0.54255545", "0.5425142", "0.5417542", "0.54150444", "0.5410281", "0.53786486", "0.5377843", "0.5374519", "0.5368992", "0.5359428", "0.53514725", "0.53508466", "0.5348424", "0.53371537", "0.53318906", "0.53130734", "0.5306698", "0.53044015", "0.52979076", "0.52953714", "0.5292065", "0.5286235", "0.5283285", "0.5259752", "0.52525085", "0.5248967", "0.5246041", "0.5240348", "0.5224116", "0.5222485", "0.521706", "0.5216843", "0.52148676", "0.52135086", "0.5204688", "0.5198948", "0.519848", "0.51966923", "0.519515", "0.51943445", "0.51916516", "0.51695836", "0.5168223", "0.5168129", "0.51558053", "0.5146542", "0.5145775", "0.513972", "0.513175", "0.5121612", "0.5118985", "0.51034707", "0.5100213", "0.5100197", "0.5089709", "0.50889003", "0.5086396", "0.5084258", "0.50759476", "0.50723976", "0.5067205", "0.5067205", "0.5052392", "0.5052029", "0.50503266", "0.50497466", "0.50497156", "0.50442773", "0.503867", "0.5033593", "0.5033555", "0.5033495", "0.50331324", "0.5032535", "0.5023573" ]
0.7362805
0
Retrieve pixel size from the header. We check attribute Pixel_size and also pixel size from ctf object, if exisits. If the two are different or if the pixel size is not set, return 1.0 and print a warning.
def get_pixel_size(img): p1 = img.get_attr_default("apix_x", -1.0) cc = img.get_attr_default("ctf", None) if cc == None: p2 = -1.0 else: p2 = round(cc.apix, 3) if p1 == -1.0 and p2 == -1.0: ERROR("Pixel size not set", "get_pixel_size", 0) return -1.0 elif p1 > -1.0 and p2 > -1.0: if abs(p1-p2) >= 0.001: ERROR("Conflict between pixel size in attribute and in ctf object", "get_pixel_size", 0) # pixel size is positive, so what follows omits -1 problem return max(p1, p2) else: return max(p1, p2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pixelsize(self):\n if hasattr(self, \"_pixelsize\"):\n return self._pixelsize\n\n try:\n return self.header[\"PixSize\"] # [arcsec]\n except KeyError:\n try:\n return abs(self.header[\"CDELT1\"]) * 3600 # [deg] -> [arcsec]\n except KeyError:\n return None", "def get_pixel_size(self):\n raise NotImplementedError", "def px_size(self):\n xp, yp = ct.c_float(), ct.c_float()\n\n self.lib.GetPixelSize(ct.pointer(xp), ct.pointer(yp))\n\n return (xp.value, yp.value)", "def testSize (self):\r\n \r\n perpixel = bytes_per_pixel [self.bih_vals [bih_BitCount]]\r\n width = self.bih_vals [bih_Width]\r\n height = self.bih_vals [bih_Height]\r\n expected = self.bih_vals [bih_SizeImage]\r\n\r\n # Rows always have multiples of 4 bytes\r\n \r\n padding = 3 - ((perpixel * width + 3) % 4)\r\n size = (width * perpixel + padding) * height\r\n\r\n if not size == expected:\r\n print \"Calculated size = %d (<> %d)\" % (size, expected)\r\n print \"***** File size error *****\"", "def get_image_size(self):", "def get_size(self):\n if self.file_meta[:2] == b'bp':\n file_meta_plist = ccl_bplist.load(BytesIO(self.file_meta))\n size = file_meta_plist['$objects'][1]['Size']\n return size\n else:\n file_meta_plist = plistlib.loads(self.file_meta)\n return file_meta_plist['size']", "def size(img):\n\treturn img.size", "def frame_size(self):\n size = None\n if self.is_video():\n width = self.__dict__['width']\n height = self.__dict__['height']\n if width and height:\n try:\n size = (int(width), int(height))\n except ValueError:\n raise FFProbeError(\"None integer size %s:%s\" % (width, height))\n\n return size", "def size(self):\n if self._size and not self._pil_image:\n return self._size\n else:\n return self.pil_image.size", "def pix_size(self):\n return self._pix_size", "def getSize(self):\n outSize = float2()\n _res = self.mAPIContext.SDGraphObjectFrame_getSize(self.mHandle, ctypes.byref(outSize))\n if _res != SDApiError.NoError.value:\n if _res == SDApiError.NoErrorOutputParamNotSet.value:\n return None\n raise APIException(SDApiError(_res))\n return outSize", "def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()", "def __header_size(self):\n return self.SIZE_LINEUPS + self.SIZE_PLAYERS_PER_LINEUP", "def byte_size(self) -> int:\n return pixel_formats[self._dtype][3] * self._components * self.width * self.height", "def numPixels(self):\n\t\treturn self.size", "def numPixels(self):\n\t\treturn self.size", "def size(self):\n return self.__image.size", "def get_pixel_size(self):\n p0 = core.PointF(0, 0)\n p1 = core.PointF(1, 1)\n tr = self.transform().inverted()[0]\n p01 = tr.map(p0)\n p11 = tr.map(p1)\n return core.PointF(p11 - p01)", "def getPixelSize(self):\n return (0.000013, 0.000013)", "def get_detector_size(self):\n sensor=self._get_sensor_info()\n return sensor.nMaxWidth,sensor.nMaxHeight", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def GetPixelSize(*args, **kwargs):\n return _gdi_.Font_GetPixelSize(*args, **kwargs)", "def size(self):\n if self._buffer is not None:\n length = SIZEOF_TAGHEADER\n if self._header.value_type == b'B':\n # TODO make sure this is right, need data that uses B to verify\n length += SIZEOF_UINT32 + (len(self._buffer))\n elif self._header.value_type in b'HZ':\n length += len(self._buffer)\n else:\n length += SIZEOF_TAG_TYPES[self._header.value_type]\n return length\n else:\n return 0", "def get_obj_size(self, name):\n\t\t# get handle\n\t\t# size of red blood cell\n\t\twidth = 60.35\n\t\treturn width", "def get_size(image):\n width, height = image.size\n\n return (width, height)", "def get_size(self) -> Tuple2IntType:\n return self.get_width(), self.get_height()", "def find_size(mod):\n left = right = top = bottom = 0\n\n for line in (n for n in mod if n[0] == \"fp_line\"):\n layer = [n for n in line if n[0] == \"layer\"][0]\n if layer[1] in (\"F.CrtYd\", \"B.CrtYd\"):\n start = [n for n in line if n[0] == \"start\"][0]\n end = [n for n in line if n[0] == \"end\"][0]\n for x, y in (start[1:], end[1:]):\n x = float(x)\n y = float(y)\n left = min(x, left)\n right = max(x, right)\n top = min(y, top)\n bottom = max(y, bottom)\n\n width = right - left\n height = bottom - top\n\n left -= width * border_ratio\n right += width * border_ratio\n top -= height * border_ratio\n bottom += height * border_ratio\n\n return left, right, top, bottom", "def get_size(self):", "def getWidth(self):\r\n width = 1\r\n if self.orientation == \"h\":\r\n width = self.size\r\n return width", "def size(self):\n return self._image_size", "def get_pixel_size_rec(rec, verbose=False):\n len_rec_x_pixel = 64\n len_rec_x_um = 71.5 / rec['wParamsNum'][30]\n \n rec_pixel_size = len_rec_x_um / len_rec_x_pixel\n \n if verbose:\n print(\"the real length of each pixel in this recording is: \\n{0} um\".format(rec_pixel_size))\n \n return rec_pixel_size", "def get_image_size(fname):\r\n \r\n logging.debug('get_image_size({})'.format(fname))\r\n\r\n with open(fname, 'rb') as fhandle:\r\n head = fhandle.read(24)\r\n if len(head) != 24:\r\n return\r\n if imghdr.what(fname) == 'png':\r\n check = struct.unpack('>i', head[4:8])[0]\r\n if check != 0x0d0a1a0a:\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n width, height = struct.unpack('>ii', head[16:24])\r\n elif imghdr.what(fname) == 'gif':\r\n width, height = struct.unpack('<HH', head[6:10])\r\n elif imghdr.what(fname) == 'jpeg':\r\n try:\r\n fhandle.seek(0) # Read 0xff next\r\n size = 2\r\n ftype = 0\r\n while not 0xc0 <= ftype <= 0xcf:\r\n fhandle.seek(size, 1)\r\n byte = fhandle.read(1)\r\n while ord(byte) == 0xff:\r\n byte = fhandle.read(1)\r\n ftype = ord(byte)\r\n size = struct.unpack('>H', fhandle.read(2))[0] - 2\r\n # We are at a SOFn block\r\n fhandle.seek(1, 1) # Skip `precision' byte.\r\n height, width = struct.unpack('>HH', fhandle.read(4))\r\n except Exception: #IGNORE:W0703\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n else:\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n logging.debug('get_image_size - width, height = {}, {}'.format(width, height))\r\n return width, height", "def getSize(self):\n\n return self.size", "def size(self):\n if hasattr(self, \"_size\"):\n return self._size\n else:\n return None", "def getSize(self):\r\n return self.size", "def getSize(self):\n assert False", "def _get_current_size(self, name):\n logger.debug(\"Getting size: '%s'\", name)\n if not self._previewtrain.get(name, None):\n return None\n img = self._previewtrain[name][1]\n if not img:\n return None\n logger.debug(\"Got size: (name: '%s', width: '%s', height: '%s')\",\n name, img.width(), img.height())\n return img.width(), img.height()", "def get_size(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetSize', self.handle)", "def Framesize(self):\n\t\treturn self._get_attribute('framesize')", "def __get_size(self):\n\t\treturn 4*self.version + 17", "def get_size(self):\n return self._surf.get_size()", "def get_size(self):\n ...", "def size(self):\n expected_exceptions = [OSError]\n\n try:\n from botocore.exceptions import ClientError\n expected_exceptions.append(ClientError)\n except ImportError:\n pass\n\n try:\n return self.image.size\n except tuple(expected_exceptions):\n return None", "def get_display_px(self):\n return self.image.size", "def bitpix_size (bitpix):\n return abs(int(bitpix))", "def getSize(self):\n return 1", "def getSize(self) -> long:\n ...", "def _size(self):\n return self._logicalSize", "def getSize(self):\n return 1", "def size(self):\n return (self.width)", "def pixelsizey(self) -> ErrorValue:\n return ErrorValue(self._data['YPixel'], self._data.setdefault('YPixelError',0.0))", "def calc_size(self):\r\n pass", "def size(self):\n return self.getattr('size')", "def get_size(self):\n return self.__size", "def hbins_size(self):\n return self.unpack_dword(0x10)", "def __get_size(self):\n return self.__size", "def test_size_from_header(self):\n\n # pylint: disable=protected-access\n\n expected = False\n actual = self.file_instance.exists()\n\n self.assertEqual(expected, actual)\n\n expected = [element for _, element in self.to_print[\"basic\"].items()]\n\n actual = Prints(\n None, None, output_file=None, only_on_file=False\n )._size_from_header(self.to_print[\"basic\"])\n\n self.assertEqual(expected, actual)", "def frame_size(self) -> tuple[int, int]:\n return (int(self.get(cv.CAP_PROP_FRAME_HEIGHT)),\n int(self.get(cv.CAP_PROP_FRAME_WIDTH)))", "def getSize(self):\n return self.size", "def __len__(self):\n return self.flat_image.size", "def get_video_size(self):\n # Get original size of video stream\n caps = self.imagesink.sinkpad.get_current_caps()\n if caps is None:\n return None\n\n # Assume these are simple caps with a single struct.\n struct = caps.get_structure(0)\n return (struct.get_int('width')[1], struct.get_int('height')[1])", "def checkSize(self):\n if self.format.maxSize and self.size > self.format.maxSize:\n return 1\n elif (self.format.minSize and\n (not self.req_grow and\n self.size < self.format.minSize) or\n (self.req_grow and self.req_max_size and\n self.req_max_size < self.format.minSize)):\n return -1\n return 0", "def get_size(self):\n result_str = subprocess.check_output([\n ADB_EXECUTOR, '-s', self.device_id, 'shell',\n 'wm', 'size'\n ]).decode(DEFAULT_CHARSET)\n width, height = result_str.replace('\\n', '').replace('\\r', '').split(' ')[-1].split('x')\n return width, height", "def getWidth(self):\n caller = self.getMyCaller()\n if caller.startsWith(\"java.\") or caller.startsWith(\"javax.\"):\n return super(Program_Test, self).getWidth()\n else:\n return getCentralRegionSize().width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def get_image_size(frame) -> tuple:\n return tuple(frame.shape[1::-1])", "def get_size(self, valueid):", "def getsize(self):\n try :\n return self.size\n except:\n raise ReferenceError", "def size (self):\n\t\timport struct\n\t\treturn struct.calcsize (self.struct)", "def fl_get_object_size(ptr_flobject):\n _fl_get_object_size = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_object_size\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT), cty.POINTER(xfdata.FL_Coord),\n cty.POINTER(xfdata.FL_Coord)], \\\n \"\"\"void fl_get_object_size(FL_OBJECT * obj, FL_Coord * w,\n FL_Coord * h)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n i_width, ptr_width = library.make_FL_Coord_and_pointer()\n i_height, ptr_height = library.make_FL_Coord_and_pointer()\n library.keep_elem_refs(ptr_flobject, i_width, i_height, ptr_width, \\\n ptr_height)\n _fl_get_object_size(ptr_flobject, ptr_width, ptr_height)\n return i_width.value, i_height.value", "def calc_image_size(spr):\n return int(max(spr.label_safe_width(), 1)), \\\n int(max(spr.label_safe_height(), 1))", "def getSize(self):\n return self.__size", "def get_frame_size(self) -> Tuple[int, int]:\n return self.__sim.frame_size()", "def size(self):\n return self.unpack_dword(0x4)", "def size(self):\n return self.unpack_dword(0x4)", "def size(self):\r\n return self.info().size", "def file_size(self):\n return self.context.getObjSize(self.context)", "def header_size(self):\n return 5", "def size(self):\n return self.properties.get('size')", "def _size_pixels(self, renderer):\n return renderer.points_to_pixels(self.size)", "def getsize(datadescriptor):\n\n\tif datadescriptor[0] == 'reg':\n\t\tsize = datadescriptor[1][2]\n\telif datadescriptor[0] == 'mem':\n\t\tsize = datadescriptor[1][1]\n\telif datadescriptor[0] == 'heap':\n\t\tsize = datadescriptor[1][2]\n\telif datadescriptor[0] == 'perp':\n\t\tsize = datadescriptor[1][2]\n\telif datadescriptor[0] == 'pmem':\n\t\tsize = datadescriptor[1][2]\n\telse:\n\t\treturn (15, \"Not a supported destination type.\")\t\n\n\treturn (0, size)", "def size(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"size\")", "def size(self):\r\n return self.size.data", "def get_size(self):\n return self.size", "def get_size(self):\n return self.size", "def get_size(self):\n return self.size", "def _get_size(self, bbox: BBox) -> tuple[int, int]:\n if self.size is not None:\n return self.size\n\n if self.resolution is not None:\n return bbox_to_dimensions(bbox, self.resolution)\n\n raise ValueError(\"Size or resolution for the requests should be provided!\")", "def getImageSize(language=None):", "def file_size(hdf):\n return os.path.getsize(hdf.file_name)", "def size(self):\n bbox = self.bbox\n return bbox[1] - bbox[0]", "def get_file_size(self) -> int:\n return self.get_main_information()['FileSize']", "def size(self) -> (float, float):\n\n return self.screen.get_surface().get_size()", "def hbins_size(self):\n return self.unpack_dword(0x28)" ]
[ "0.72802687", "0.68242306", "0.68130434", "0.677174", "0.6668118", "0.6537701", "0.6535796", "0.6514242", "0.6482996", "0.64115053", "0.63932556", "0.6321486", "0.6316444", "0.6295255", "0.62676024", "0.62676024", "0.62564975", "0.6233684", "0.6221311", "0.6203486", "0.61531", "0.61531", "0.6130825", "0.6117649", "0.6106248", "0.60972565", "0.6089569", "0.6078482", "0.60716933", "0.6069912", "0.6055374", "0.60546607", "0.60507476", "0.6037012", "0.602436", "0.602018", "0.60102594", "0.5996008", "0.5989069", "0.5985866", "0.5985264", "0.5978419", "0.5965733", "0.59645736", "0.59568685", "0.5954181", "0.59475714", "0.5946765", "0.5932548", "0.5932095", "0.59280527", "0.59276617", "0.59271747", "0.5913383", "0.5910055", "0.5909292", "0.5900129", "0.58988994", "0.58973604", "0.589693", "0.589222", "0.58857685", "0.58817446", "0.5879983", "0.5876611", "0.5875422", "0.5875422", "0.5875422", "0.5875422", "0.5875422", "0.5875422", "0.5875422", "0.5855958", "0.5844548", "0.58437574", "0.5843107", "0.58304644", "0.5828518", "0.5825619", "0.5825401", "0.58239114", "0.58239114", "0.5820135", "0.5817856", "0.58095807", "0.5806072", "0.580572", "0.5795704", "0.57933354", "0.57888275", "0.57875437", "0.57875437", "0.57875437", "0.57861614", "0.57840246", "0.578129", "0.5780497", "0.5773986", "0.5770599", "0.5770345" ]
0.69274837
1
Set pixel size in the header. Set attribute Pixel_size and also pixel size in ctf object, if exists.
def set_pixel_size(img, pixel_size): nz = img.get_zsize() img.set_attr("apix_x", round(pixel_size, 3)) img.set_attr("apix_y", round(pixel_size, 3)) img.set_attr("apix_z", round(pixel_size, 3)) cc = img.get_attr_default("ctf", None) if(cc): cc.apix = pixel_size img.set_attr("ctf", cc)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_pixel_size(self, pixel_size):\n raise NotImplementedError", "def set_size(self, w, h):\n\t\tpass", "def set_size(self, value='S'):\n upper = value.upper()\n\n if upper == 'M': # Medium: double height\n # size = 0x01\n # charHeight = 48\n # maxColumn = 32\n self.double_height_on()\n self.double_width_off()\n elif upper == 'L': # Large: double width and height\n # size = 0x11\n # charHeight = 48\n # maxColumn = 16\n self.double_height_on()\n self.double_width_on()\n else: # Small: standard width and height\n # size = 0x00\n # charHeight = 24\n # maxColumn = 32\n self.double_width_off()\n self.double_height_off()\n # writeBytes(ASCII_GS, '!', size)\n # prevByte = '\\n' # Setting the size adds a linefeed", "def update_size(self):\n self.size = self.image.size\n self.width, self.height = self.size", "def set_calibration(self, px: float):\n self.meta_data['SizeX'] = px\n self.meta_data['SizeY'] = px\n self.meta_data['SizeZ'] = px", "def set_size(self, size):\n \n self.width = size[0]\n self.height = size[1]", "def _set_pixel_size(self) -> None:\n # Not Pansharpened images\n if self.band_combi == Sv1BandCombination.PMS:\n # TODO: manage default resolution for PAN band ?\n self.pixel_size = self._ms_res\n # Pansharpened images\n else:\n self.pixel_size = self._pan_res", "def size(self, value):\n self.width = value\n self.height = value", "def setSize(self, value):\n _res = self.mAPIContext.SDGraphObjectFrame_setSize(self.mHandle, ctypes.byref(value))\n if _res != SDApiError.NoError.value:\n if _res == SDApiError.NoErrorOutputParamNotSet.value:\n return None\n raise APIException(SDApiError(_res))\n return None", "def size(self, val):\n self.width = val\n self.height = val", "def SetPixelSize(*args, **kwargs):\n return _gdi_.Font_SetPixelSize(*args, **kwargs)", "def setsize(self, size):\n self.__size = size", "def updateSizeHead(self, size): \n self.avatarConfiguration[\"headSize\"] = size\n self.paintHead()\n self.paintHair()\n if (self.avatarConfiguration[\"mask\"]):\n self.generateMask(\"imgUpload.png\")\n self.paintMask()", "def updateSize(self, *args):\n return None", "def setDescriptorSize(self, dsize): # real signature unknown; restored from __doc__\n pass", "def set_size(self, size=None):\n if not size:\n size = self.output_size\n self.img = cv2.resize(self.img, size)\n self.update_image()\n self.update_size()", "def fl_set_object_size(ptr_flobject, width, height):\n _fl_set_object_size = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_object_size\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT), xfdata.FL_Coord,\n xfdata.FL_Coord], \\\n \"\"\"void fl_set_object_size(FL_OBJECT * obj, FL_Coord w,\n FL_Coord h)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n i_width = library.convert_to_FL_Coord(width)\n i_height = library.convert_to_FL_Coord(height)\n library.keep_elem_refs(ptr_flobject, width, i_width, height, i_height)\n _fl_set_object_size(ptr_flobject, i_width, i_height)", "def size(self, value):\n self.width = value", "def calc_size(self):\r\n self.height = HEIGHT_STATUS", "def size(self, size):\n self.width = size\n self.height = size", "def size(self, size):\n self.width = size\n self.height = size", "def set_size(self, size):\n self.dtSize = size", "def size(self, size):\n self._size = size", "def changeElemProp(self, styleObj):\n if self.specified:\n propName = 'CharHeight%s' % self.propSuffix\n logger.debug(\"set %s %1.1f\", propName, self.size)\n styleObj.setPropertyValue(propName, self.size)", "def set_file_size(self, file_path, row):\n row[_column_name] = os.path.getsize(file_path)", "def setFrameSize(self, frame_size):\n \n self.frame_size = frame_size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def __header_size(self):\n return self.SIZE_LINEUPS + self.SIZE_PLAYERS_PER_LINEUP", "async def size(\n self, *, header: Optional[headers.RequestHeader] = None\n ) -> SizeResponse:\n\n request = SizeRequest()\n if header is not None:\n request.header = header\n\n return await self._unary_unary(\n \"/atomix.set.SetService/Size\", request, SizeResponse,\n )", "def setPixelsPerInch(self,value):\n self.PDFreactorConfiguration.in1[\"pixelsPerInch\"] = value", "def svn_info_t_size_set(svn_info_t_self, apr_size_t_size): # real signature unknown; restored from __doc__\n pass", "def __set_size(self, size):\n if not isinstance(size, int):\n raise TypeError('The size should be an integer')\n if size < 64 or size > 1500: # It should be in the Standard Ethernet Payload range\n raise ValueError('The size should be in the range of Standard Ethernet frames [64,1500] bytes')\n self.__size = size", "def pixelsize(self):\n if hasattr(self, \"_pixelsize\"):\n return self._pixelsize\n\n try:\n return self.header[\"PixSize\"] # [arcsec]\n except KeyError:\n try:\n return abs(self.header[\"CDELT1\"]) * 3600 # [deg] -> [arcsec]\n except KeyError:\n return None", "def updateHeaderSize( self, nNewDataSize ):\n self.nDataSize = int( nNewDataSize )\n self.nNbrSample = int( self.nDataSize * 8 / self.nNbrChannel / self.nNbrBitsPerSample )\n self.rDuration = self.nDataSize / float( self.nAvgBytesPerSec )", "def calc_size(self):\r\n pass", "def size(self,*args):\n if len(args) == 2:\n x,y = map(int,args)\n else:\n x,y = map(int,args[0])\n self.check_size(x,y)\n self.data['chs'] = '%dx%d'%(x,y)", "def size(self, size={}):\n # type: (dict) -> Entity\n if not size:\n return\n for s in ['width', 'height']:\n if s in size:\n self.type_def[s] = int(size[s])\n\n return self", "def set_frame_size(self, frame_size_selector):\n raise NotImplementedError", "def SetSize(*args, **kwargs):\n return _gdi_.Bitmap_SetSize(*args, **kwargs)", "def SetUniformBitmapSize(self, size):\r\n\r\n self._requested_bmp_size = wx.Size(*size)\r\n\r\n # if window is already initialized, recalculate the tab height\r\n if self._dummy_wnd:\r\n self.UpdateTabCtrlHeight()", "def setPixelsPerInchShrinkToFit(self,value):\n self.PDFreactorConfiguration.in1[\"pixelsPerInchShrinkToFit\"] = value", "def setImageSize(cls, width, height):\n\t\tcls._width = width\n\t\tcls._height = height", "def header_size(self):\n return 5", "def setSize_0(self, size):\n self.setSize(size.getWidth(), size.getHeight())", "def setFilmSize(self, size_x, size_y):\n self.lens.setFilmSize(size_x, size_y)\n self.rebuildMatrixCache()", "def set_2d_size(self, w=None, h=None, x=0, y=0):\r\n from pi3d.Display import Display\r\n if w == None:\r\n w = Display.INSTANCE.width\r\n if h == None:\r\n h = Display.INSTANCE.height\r\n self.unif[42:44] = [x, y]\r\n self.unif[45:48] = [w, h, Display.INSTANCE.height]", "def testSize (self):\r\n \r\n perpixel = bytes_per_pixel [self.bih_vals [bih_BitCount]]\r\n width = self.bih_vals [bih_Width]\r\n height = self.bih_vals [bih_Height]\r\n expected = self.bih_vals [bih_SizeImage]\r\n\r\n # Rows always have multiples of 4 bytes\r\n \r\n padding = 3 - ((perpixel * width + 3) % 4)\r\n size = (width * perpixel + padding) * height\r\n\r\n if not size == expected:\r\n print \"Calculated size = %d (<> %d)\" % (size, expected)\r\n print \"***** File size error *****\"", "def update_size(self, target, new_size): \n # check the validity of the new size\n if new_size.isdigit():\n # it is an integer so that is good\n if int(new_size) >= 0:\n # it is also positive so determine which property is to \n # be updated then perform that update\n if target == 'checksum_size':\n self.chksum_size = int(new_size)\n \n elif target == 'name_size':\n self.name_size = int(new_size)\n \n elif target == 'wflag_size':\n self.wflag_size = int(new_size) \n \n elif target == 'ascii_size':\n self.ascii_size = int(new_size) \n \n elif target == 'time_size':\n self.time_size = int(new_size) \n \n elif target == 'length_size':\n self.length_size = int(new_size) \n \n else:\n # this is not a valid property to update\n self.error_log.append('*** ' + str(target) + 'found '\\\n ' in xml file is not a valid '\\\n 'default to update ***')\n # end if\n \n else:\n self.error_log.append('*** Invalid ' + str(target) +\n ' in xml file ***') \n # end if\n \n else:\n self.error_log.append('*** Invalid ' + str(target) + \n ' in xml file ***')\n # end if ", "def set_style_size_options(f, description, variable_label, node_label):\n f.write(\" #description { font-size : \" + description + \"px; }\\n\")\n f.write(\" .domain_label { font-size : \" + variable_label + \"px; }\\n\")\n f.write(\" .sideLabels, .nodeLabel, .nodeName, .edgeLabels { font-size : \" + node_label + \"px; }\\n\")\n f.write(\"</style>\\n\")", "def set_dims_in_hdr(hdr, startx, starty, cols, rows):\n hdr['startX'] = (startx, 'Starting CCD pixel column')\n hdr['endX'] = (startx + cols, 'Ending CCD pixel column+1')\n hdr['startY'] = (starty, 'Starting CCD pixel row')\n hdr['endY'] = (starty + rows, 'Ending CCD pixel row+1')", "def OnSize(self, event):\r\n\r\n self.DoHeaderLayout()", "def set_size(self, width, height):\r\n \r\n self.image = pygame.transform.scale(self.image, (width, height))\r\n self.rect = self.image.get_rect()", "def set_point_size(self, point_size=0.0):\r\n for b in self.buf:\r\n b.unib[8] = point_size", "def _assign_sizes(self):", "def actual_size(self, size, mode='normal', state='on'):\n raise NotImplementedError", "def setSize(self, y, h):\n if (h <= 0.0):\n self.ovflRect.hide()\n self.canvas.setHeight(y)\n else:\n self.ovflRect.setRect(0, y, self.mainWidth, h)\n self.ovflRect.show()\n self.canvas.setHeight(y + h)", "def SetElementSize(self, element_id, size):\r\n \r\n if element_id == AUI_TBART_SEPARATOR_SIZE:\r\n self._separator_size = size\r\n elif element_id == AUI_TBART_GRIPPER_SIZE:\r\n self._gripper_size = size\r\n elif element_id == AUI_TBART_OVERFLOW_SIZE:\r\n self._overflow_size = size", "def calc_size(self):\r\n self.height = self.termheight - HEIGHT_CON - HEIGHT_STATUS\r\n self.posy = HEIGHT_STATUS\r\n self.width = WIDTH_ORDERBOOK", "def updateHeaderSizeFromDataLength( self ):\n self.updateHeaderSize( int( len( self.data ) * self.nNbrBitsPerSample / 8 ) )", "def add_size_fig(cls, quad, obj_temp):\n\n\t\ttype = abs(quad.result) // 1000 # integer division\n\t\tif not cls.fig_can_add_size(type):\n\t\t\tError.wrong_attribute_for_figure_execution(type, \"size\")\n\n\t\tsize = cls.get_address_value(quad.right_operand)\n\t\tobj_temp.setSize(size)", "def size(self, new_size):\n if type(new_size) is str:\n new_size = new_size.replace(\" \", \"\").upper()\n new_size = new_size.replace(\")\", \"\")\n new_size = new_size.replace(\"(\", \"\")\n new_size = new_size.replace(\",\", \".\")\n new_size = new_size.replace(\"B\", \"\").strip()\n target_unit = None\n multiplier = 1\n is_bytes = False\n try:\n float(new_size)\n target_unit = \"B\"\n is_bytes = True\n except Exception as e:\n pass\n\n if not is_bytes:\n multiplier *= 1024\n for unit in [\"K\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\", \"Y\"]:\n if not target_unit and unit in new_size:\n target_unit = unit\n multiplier *= 1024\n # Reject double units\n elif target_unit and unit in new_size:\n target_unit = None\n break\n\n if target_unit:\n new_size = new_size.replace(target_unit, \"\").strip()\n try:\n self._size = int(float(new_size) * multiplier)\n except Exception as e:\n logger.error(f\"Failed to set a size from \\\"{new_size}\\\"\")\n logger.error(e)\n\n elif type(new_size) is int:\n self._size = new_size\n\n else:\n raise Exception(\"Wrong size type provided ({type(new_size)})\")\n\n if not self._size:\n logger.warn(f\"Failed to set a size from \\\"{new_size}\\\"\")", "def get_pixel_size(self):\n raise NotImplementedError", "def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)", "def size(self, size: int):\n\n self._size = size", "def change_size(self, width, height):\n oldw = float(self.size().width())\n oldh = float(self.size().height())\n\n if self.indicator_type == 'session':\n neww = int(oldw + oldw * (width / 100.0))\n if neww > 0:\n self.setFixedSize(neww, oldh)\n elif self.indicator_type == 'unit':\n newh = int(oldh + oldh * (height / 100.0))\n if newh > 0:\n self.setFixedSize(oldw, newh)\n\n self.set_font_size()", "def get_image_size(self):", "def set_pointsize(self, pointsize):\n\tself.m_pointsize = pointsize", "def setoutputsize(self, size, column=None):\n pass", "def size(self, width, height):\n self._p('[size] {} {}'.format(width, height))", "def calc_size(self):\r\n self.height = HEIGHT_CON\r\n self.posy = self.termheight - self.height", "def set_size(self, width, height):\n cairo.cairo_xcb_surface_set_size(self._pointer, width, height)\n self._check_status()", "def SetImageSize(self,x=IS.GET_IMAGE_SIZE_X_MAX,y=0):#non-zero ret\r\n r = CALL(\"SetImageSize\",self,INT(x),INT(y))\r\n if x & 0x8000 == 0x8000:\r\n return self.CheckForNoSuccessError(r)\r\n return self.CheckForSuccessError(r)", "def changeSize(self, value):\n self.layer.brush_size = value", "def get_obj_size(self, name):\n\t\t# get handle\n\t\t# size of red blood cell\n\t\twidth = 60.35\n\t\treturn width", "def set_pic_size(self, im_name):\n im_vals = np.genfromtxt(im_name, delimiter=self.delim)\n self.pic_width = int(np.size(im_vals[0]) - 1) # the first column of ASCII image is row number\n try: self.pic_height = int(np.size(im_vals[:,0])) \n except IndexError: \n self.pic_width = int(np.size(im_vals) - 1)\n self.pic_height = 1\n self.create_rect_mask()\n return self.pic_width, self.pic_height", "def updateSizeBody(self, size): \n self.avatarConfiguration[\"bodySize\"] = size\n self.paintBody()\n self.paintShoes()\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n self.paintShirt()\n self.paintTrousers()\n else:\n self.paintSkirt()", "def svn_info_t_size64_set(svn_info_t_self, svn_filesize_t_size64): # real signature unknown; restored from __doc__\n pass", "def setDataSize(self, head,payload,eop):\n self.dataSize = len(head)+len(payload)+len(eop)", "def calc_size(self):\r\n self.posx = WIDTH_ORDERBOOK\r\n self.posy = HEIGHT_STATUS\r\n self.width = self.termwidth - WIDTH_ORDERBOOK\r\n self.height = self.termheight - HEIGHT_CON - HEIGHT_STATUS", "def size(self, size_input: Tuple[str, str]):\n self.isize = [UIMetric.parse(size_input[0]),\n UIMetric.parse(size_input[1])]", "def set_dimensions_for_frame(self):\n if not self.h or not self.w:\n (self.h, self.w) = self.frame.shape[:2]", "def ior_param_update(self, oclass, sizes):\n self.ior_cmd.block_size.update(sizes[1])\n self.ior_cmd.transfer_size.update(sizes[2])\n self.ior_cmd.dfs_oclass.update(oclass[0])\n self.ior_cmd.dfs_dir_oclass.update(oclass[0])\n self.ior_cmd.dfs_chunk.update(sizes[0])", "def setframesize(trafficItemName, frameSize):\n queryData = {'from': '/traffic',\n 'nodes': [{'node': 'trafficItem', 'properties': ['name'],\n 'where': [{'property': 'name', 'regex': trafficItemName}]},\n {'node': 'configElement', 'properties': [], 'where': []}]}\n queryResponse = middleware.ixn.query(data=queryData)\n if queryResponse.json()['result'][0]['trafficItem'] == []:\n print('\\nNo such Traffic Item name found: %s' % trafficItemName)\n return\n configElementObj = queryResponse.json()['result'][0]['trafficItem'][0]['configElement'][0]['href']\n middleware.trafficObj.configTrafficItem(mode='modify', obj=configElementObj, configElements={'frameSize': frameSize})", "def test_sizesetter(self):\n Rectangle.reset_objects()\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = 100\n self.assertEqual(r1.size, 100)", "def SetToolBitmapSize(self, size):\r\n\r\n # TODO: wx.ToolBar compatibility\r\n pass", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def get_size(self):\n\t\tpath =os.path.join(self.path, self.init_str)\n\t\ttry:\n\t\t\tself.size = os.path.getsize(path)\n\t\texcept :\n\t\t\tself.size = 0", "def set_frame_size(*args):\n return _ida_frame.set_frame_size(*args)", "def pix_size(self):\n return self._pix_size", "def set_canvas_size(self, width_npix, height_npix):\n\n self.variables.canvas_width = width_npix\n self.variables.canvas_height = height_npix\n if self.variables.canvas_image_object is not None:\n self.variables.canvas_image_object.canvas_nx = width_npix\n self.variables.canvas_image_object.canvas_ny = height_npix\n self.config(width=width_npix, height=height_npix)", "def set_data_size(self, num_bytes):\n self.model.data_size = num_bytes\n self.refresh_memory()", "def updateHeaderComputedValues( self ):\n self.nAvgBytesPerSec = int( self.nNbrChannel*self.nSamplingRate*self.nNbrBitsPerSample/8 )\n self.nSizeBlockAlign = int( self.nNbrChannel*self.nNbrBitsPerSample/8 )\n self.dataType = Wav.getDataType( self.nNbrBitsPerSample )", "def set_pixel_width(self, width):\n # set in um\n self._dll.ShamrockSetPixelWidth(self._device, c_float(width))", "def SetSize(self, m: 'unsigned int', n: 'unsigned int') -> \"void\":\n return _itkArray2DPython.itkArray2DUI_SetSize(self, m, n)", "def _save_size(self):\n if self.width_key is not None:\n (width, height) = self.window.get_size()\n config.set(self.width_key, width)\n config.set(self.height_key, height)\n config.save()", "def fl_set_form_size(ptr_flform, width, height):\n _fl_set_form_size = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_form_size\", \\\n None, [cty.POINTER(xfdata.FL_FORM), xfdata.FL_Coord,\n xfdata.FL_Coord], \\\n \"\"\"void fl_set_form_size(FL_FORM * form, FL_Coord w, FL_Coord h)\"\"\")\n library.check_if_flinitialized()\n library.verify_flformptr_type(ptr_flform)\n i_width = library.convert_to_FL_Coord(width)\n i_height = library.convert_to_FL_Coord(height)\n library.keep_elem_refs(ptr_flform, width, i_width, height, i_height)\n _fl_set_form_size(ptr_flform, i_width, i_height)" ]
[ "0.6878347", "0.6856726", "0.6558465", "0.6309463", "0.6261243", "0.6204648", "0.61168814", "0.6115968", "0.61134034", "0.60657334", "0.60597295", "0.60385007", "0.60327333", "0.6032628", "0.6013194", "0.59783244", "0.5973719", "0.5945691", "0.5938708", "0.59118575", "0.59118575", "0.5872305", "0.5813494", "0.57553405", "0.5741893", "0.5719532", "0.5699102", "0.5699102", "0.5699102", "0.5699102", "0.5652728", "0.5648464", "0.56458235", "0.5631856", "0.5626023", "0.56214863", "0.5611817", "0.5608473", "0.5602085", "0.55573493", "0.5551673", "0.5549055", "0.5545766", "0.5536038", "0.5528835", "0.5515291", "0.5514481", "0.55111986", "0.55043644", "0.5486529", "0.5483955", "0.5483121", "0.54554915", "0.54507893", "0.5447467", "0.54460466", "0.5424867", "0.54068315", "0.5380729", "0.5374828", "0.5361523", "0.53578687", "0.53577983", "0.5351647", "0.5350738", "0.53506047", "0.53501093", "0.5349152", "0.533576", "0.5328392", "0.5327609", "0.53252393", "0.53210336", "0.53114533", "0.53086877", "0.52997446", "0.5295981", "0.5291525", "0.5271539", "0.5269756", "0.52687365", "0.52686745", "0.52514434", "0.52382594", "0.52376515", "0.52371025", "0.5219713", "0.5194268", "0.51903504", "0.51903504", "0.5182451", "0.5180674", "0.5176908", "0.51724106", "0.5162056", "0.5157507", "0.5155378", "0.51541036", "0.5145876", "0.51441485" ]
0.67625564
2
For an input directory, create a dictionary mapping output names to input ROOT files.
def build_groupings(idir: str) -> dict: bkg_group = {key: [ifile for ifile in glob(f'{idir}/*_{key}_*.root')] for key in bkgs} pw_group = {key: [ifile for ifile in glob(f'{idir}/{key}*.root')] for key in powhegs} wh_pw_group = [ifile for name in wh_powhegs for ifile in glob(f'{idir}/{name}*.root')] ungrouped = [ifile for ifile in glob(f'{idir}/*.root') if 'madgraph' in ifile or 'JHU' in ifile] group = {} for key, files in bkg_group.items(): if len(files) > 0: group[key] = files for key, files in pw_group.items(): if len(files) > 0: group[key] = files for ifile in ungrouped: name = ifile.split('/')[-1].replace('.root', '') name = name.split('_SYST')[0].replace('-', '_') name = name.replace('_ggH125', '').replace('_VBF125', '').replace('_WH125', '').replace('_ZH125', '') group[name] = [ifile] if len(wh_pw_group) > 0: group['wh125_powheg'] = wh_pw_group return group
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _output_paths(outputs: Dict[str, str]) -> Dict[str, Path]:\n out_paths = dict()\n for name in outputs:\n target_dir = _BASE_PATH / name\n target_dir.mkdir()\n out_paths[name] = target_dir / 'data.json'\n return out_paths", "def treat(input, output):\n files = find(input)\n acc = []\n for file in files:\n fileInfo = extract(file)\n out = makeOutputPath(output, fileInfo[\"path\"], fileInfo[\"filename\"])\n if not out == None:\n fileInfo[\"outPath\"] = out\n acc += [fileInfo]\n return acc", "def incoming_paths(root_dir, parent_dir):\n return {\n 'F1' : os.path.join(root_dir, \"F1\"),\n 'F' : os.path.join(parent_dir, \"F\"),\n 'F2' : os.path.join(parent_dir, \"F2-in\"),\n 'D1' : os.path.join(root_dir, \"D1\"),\n 'D' : os.path.join(parent_dir, \"D\"),\n 'D2' : os.path.join(parent_dir, \"D2-in\"),\n }", "def build_filelist(input_dir: str, syst: bool = False) -> dict:\n filedict = {\n idir.split('SYST_')[-1].split('/')[0]: {}\n for idir in glob('{}/*'.format(input_dir)) if 'SYST_' in idir\n }\n\n filedict['nominal'] = build_groupings(f'{input_dir}/NOMINAL')\n if syst:\n for idir in filedict.keys():\n if idir == 'nominal':\n continue\n elif 'Rivet' in idir:\n continue\n filedict[idir] = build_groupings(f'{input_dir}/SYST_{idir}')\n else:\n filedict = {'nominal': filedict['nominal']}\n\n pprint(filedict, width=150)\n return filedict", "def create_file_dict():\n import os\n file_dict = {}\n for root, dirs, files in os.walk('.'):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n for f in files:\n try:\n with open(f, 'r') as thing:\n res = thing.readline()\n except:\n res = ''\n file_name = os.path.join(root, f).lstrip('./')\n file_dict[file_name] = res\n return file_dict", "def create_new_directories_for_filter_reads(in_dir, out_dir):\n for sub_in_dir in get_all_sub_directories(in_dir):\n # make dir if it doesnt exist\n sub_out_dir = os.path.join(out_dir, os.path.basename(sub_in_dir))\n if not os.path.isdir(sub_out_dir):\n os.mkdir(sub_out_dir)\n yield sub_in_dir, sub_out_dir", "def _walk_dirs(self):\n for project_name in self.new_source_paths.keys():\n # print \"-------- Now mapping ---- \" + project_name\n search_path = self.root + project_name + '\\\\Data'\n for dirpath, subdirs, files in os.walk(search_path):\n for file in files:\n self.new_source_paths[project_name][file] = dirpath\n # print \"------------ Finished mapping ------- \" + project_name\n return self.new_source_paths", "def create_input_files(self, datasets_dict):\n ifname = self.keywords['inputfile']\n dirstem = os.path.dirname(ifname)\n basename = os.path.basename(ifname).split('.')[0]\n createdfiles=list()\n if dirstem == \"\":\n dirstem = os.getcwd()\n dkeys = datasets_dict.keys()\n dkeys.sort()\n dct=1\n for didx in dkeys:\n newfile = MASTFile()\n newfile.data = list(datasets_dict[didx])\n newname=\"%s/loop_%s_%s.inp\" % (dirstem, basename, str(dct).zfill(2))\n newfile.to_file(newname)\n #createdfiles.append(os.path.basename(newname))\n createdfiles.append(newname)\n dct=dct+1\n return createdfiles", "def get_files(input_dir):\n file_rep = { \"tars\" : [] }\n \n files = os.listdir(input_dir)\n \n the_file, the_date = find_bootstrap(files)\n \n #add index file in file_rep\n file_rep['index'] = the_file\n file_rep['date'] = the_date\n \n pattern = \"ncep_forecast_%s_(?P<name>\\S+).tar\" % (the_date)\n \n the_re = re.compile(pattern)\n\n for the_file in files:\n matched = the_re.match(the_file)\n if matched:\n print(\"matched %s\" % (matched.group(\"name\")))\n file_rep['tars'].append(the_file)\n \n return file_rep", "def map_files(key):\n \n datadir=os.path.join(os.path.dirname(__file__),'ncnr_sample_data')\n filedict={'empty_1m':os.path.join(datadir,'SILIC001.SA3_SRK_S101'),\n 'empty_4m':os.path.join(datadir,'SILIC002.SA3_SRK_S102'),\n 'empty_cell_1m':os.path.join(datadir,'SILIC003.SA3_SRK_S103'),\n 'blocked_1m':os.path.join(datadir,'SILIC004.SA3_SRK_S104'),\n 'trans_empty_cell_4m':os.path.join(datadir,'SILIC005.SA3_SRK_S105'),\n 'trans_sample_4m':os.path.join(datadir,'SILIC006.SA3_SRK_S106'),\n 'blocked_4m':os.path.join(datadir,'SILIC007.SA3_SRK_S107'),\n 'empty_cell_4m':os.path.join(datadir,'SILIC008.SA3_SRK_S108'),\n 'sample_1m':os.path.join(datadir,'SILIC009.SA3_SRK_S109'),\n 'sample_4m':os.path.join(datadir,'SILIC010.SA3_SRK_S110'),\n 'mask':os.path.join(datadir,'DEFAULT.MASK'),\n 'div':os.path.join(datadir,'PLEX_2NOV2007_NG3.DIV'),\n }\n return filedict[key]", "def make_files(dir_in, dir_out):\n try:\n listaFisiere = os.listdir(f\"{dir_in}\")\n except Exception as eroare:\n print(\"Path to input file is invalid, exiting...\")\n quit()\n if not os.path.exists(f\"{dir_out}\"):\n os.mkdir(f\"{dir_out}\")\n paths_out = []\n for numeFisier in listaFisiere:\n numeFisierOutput=\"output_\"+numeFisier\n f=open(f\"{dir_out}/\"+numeFisierOutput,\"w\")\n paths_out.append(f\"{dir_out}/\"+numeFisierOutput)\n f.close()\n for i in range(len(listaFisiere)):\n listaFisiere[i] = dir_in + \"/\" + listaFisiere[i]\n return listaFisiere, paths_out", "def catalog_files(directory):\n catalog = {}\n for dirpath, filename, files in os.walk(directory):\n catalog[dirpath] = files\n for dirpath, files in catalog.items():\n matched_files = recognize_files(files)\n catalog[dirpath] = matched_files\n return catalog", "def makeIOforTest(path, inFileNames, outFileNames):\n \n test_in = []\n test_out = []\n\n for (dirpath, _, filenames) in os.walk(path):\n if 'tc-' in dirpath:\n files_in = {}\n files_out = {}\n\n for file in inFileNames:\n files_in[file] = fileContentsToStr(os.path.join(dirpath,file))\n for file in outFileNames:\n files_out[file] = fileContentsToStr(os.path.join(dirpath,file)) \n\n test_in.append(files_in)\n test_out.append(files_out)\n \n return {'in_params': test_in,\n 'out_params': test_out}", "def merge(indir, outdir, pname=''):\n\n nnew = 0\n nadd = 0\n\n LOG.debug(pname + '/' + indir.GetName())\n\n # Collect all existing keys in outdir into a dict to avoid calling outdir.Get() (slow)\n # Pick up only the latest versions (highest key cycle number)\n outkeys = {}\n for key in outdir.GetListOfKeys():\n name = key.GetName()\n try:\n if outkeys[name].GetCycle() > key.GetCycle():\n continue\n except KeyError:\n pass\n\n outkeys[name] = key\n\n # Convert the dict of keys into a dict of histograms\n outcont = dict((name, key.ReadObj()) for name, key in outkeys.iteritems())\n\n # Repeat for the input keys\n inkeys = {}\n for key in indir.GetListOfKeys():\n name = key.GetName()\n try:\n if inkeys[name].GetCycle() > key.GetCycle():\n continue\n except KeyError:\n pass\n\n inkeys[name] = key\n\n # Read input objects and write to output\n for name, key in inkeys.iteritems():\n obj = key.ReadObj()\n\n if obj.IsA() is tdirectoryfile:\n # If the input object is a directory, recurse\n\n try:\n outsubdir = outcont[name]\n except KeyError:\n outsubdir = outdir.mkdir(name)\n\n nsubnew, nsubadd = merge(obj, outsubdir, pname + '/' + indir.GetName())\n nnew += nsubnew\n nadd += nsubadd\n\n else:\n # Write to outdir or add to an existing histogram\n\n outdir.cd()\n try:\n outhist = outcont[name]\n except KeyError:\n obj.SetDirectory(outdir)\n obj.Write()\n nnew += 1\n else:\n outhist.Add(obj)\n outhist.Write(name)\n outhist.Delete()\n nadd += 1\n\n # Delete the object from memory immediately\n # Using TObject::Delete because TDirectory::Delete does different things\n ROOT.TObject.Delete(obj)\n\n return nnew, nadd", "def do_2004(in_dir, out_dir):\n dir_items = setup_outdir_and_get_input(in_dir, out_dir)\n for idx, item in enumerate(dir_items):\n full_path = in_dir + os.path.sep + item\n print(f\"{full_path} -> {out_dir}/{idx}\")\n create_dirs_and_write_files(full_path, idx, in_dir, item, out_dir)", "def generate_input_files(elevation_folder_path, template_input_file_path):\n import pathlib\n json_dict = get_inputs_from_file(template_input_file_path)\n\n path_to_match = pathlib.Path(elevation_folder_path)\n\n for heightfile in path_to_match.glob(\"*.npy\"):\n dot_index = str(heightfile).rfind('.')\n filename_base = str(heightfile)[:dot_index]\n opt_output_filename = filename_base + \".out\"\n opt_input_filename = filename_base + \".json\"\n\n localdict = json_dict.copy()\n\n localdict[\"output_file\"] = opt_output_filename\n localdict[\"elevation_file\"] = str(heightfile)\n\n dump_json_dict(out_dict=localdict, filename=opt_input_filename)", "def get_dir(root_dir):\n\n dir_dict = {}\n\n for item in os.scandir(root_dir):\n item_type = \"\"\n\n if item.is_file():\n item_type = \"[FILE]\"\n elif item.is_dir():\n item_type = \"[DIR]\"\n\n dir_dict[item.name] = item_type\n\n return dir_dict", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs", "def collect_seq_to_file(image_dirs, prefix, suffix):\n seq_to_file = {}\n for image_dir in image_dirs:\n seq_to_file.update(get_seq_to_file(image_dir, prefix, suffix))\n return seq_to_file", "def scan(root: str, config: dict) -> dict:\n\n extensions = config['extensions']\n exceptions = config['exceptions']\n\n # get list of directories, with their contents (files and sub-directories)\n dirs_by_path = {\n get_path_from_common_root(dir_path, root): (\n dir_path,\n dir_names,\n [name for name in file_names if is_valid_file_name(name, config)]\n )\n for dir_path, dir_names, file_names in os.walk(root)\n }\n\n dirs_by_path = {key: value for key, value in dirs_by_path.items() if value[1] or value[2]}\n\n # get list of file paths\n file_path_list = [\n (file_dir, file_name)\n for file_dir, _, file_names in dirs_by_path.values()\n for file_name in file_names\n ]\n\n files_by_path = {}\n # todo: put below loop contents into function, used in above list comprehension\n for file_dir, file_name_with_extension in file_path_list:\n # todo: remove code file definition from here to allow this to be reused for non-code related files\n # todo: refactor to use inheritable File class instead of named tuple, to be reused for non-code files\n\n if file_name_with_extension in exceptions:\n continue\n\n file = CodeFile(\n imports=[],\n exports=[],\n local_params=[],\n blocks=[],\n dir=file_dir,\n name=file_name_with_extension.rsplit('.', 1)[0],\n extension=file_name_with_extension.rsplit('.', 1)[1]\n )\n\n parse_file(file, config)\n file_no_dupes = remove_duplicate_entries(file)\n\n dir_key = get_path_from_common_root(file_dir, root)\n if dir_key not in files_by_path:\n files_by_path[dir_key] = []\n\n files_by_path[dir_key].append(file_no_dupes)\n\n return {'dirs_by_path': dirs_by_path, 'files_by_path': files_by_path,\n 'starting_point': get_path_from_common_root(root, root)}", "def build_out_path(d):\n assert \"output_dir\" in d, \"Dictionary must have output dir\"\n\n path = d.pop(\"output_dir\")\n\n weights_path = path + \"weights_\"\n log_path = path + \"log_\"\n train_data_path = path + \"train_data_\"\n\n # Sort dictionary for consistency\n odict = OrderedDict(sorted(d.items(), key=lambda t: t[0]))\n\n for key, val in zip(odict.keys(), odict.values()):\n if key not in (\"input_dir\", \"loss_type\", \"typical_epochs\", \"num_epochs\"):\n text = str(key) + \"_\" + str(val) + \"_\"\n weights_path += text\n log_path += text\n train_data_path += text\n\n weights_path += \".pth\"\n log_path += \".log\"\n train_data_path += \".pkl\"\n\n return weights_path, log_path, train_data_path", "def do_2003(in_dir, out_dir):\n\n dir_items = setup_outdir_and_get_input(in_dir, out_dir)\n for idx, item in enumerate(dir_items):\n full_path = in_dir + os.path.sep + item\n print(f\"{item} -> {idx}\")\n create_dirs_and_write_files(full_path, idx, in_dir, item, out_dir)", "def _download_inputs(inputs: Dict[str, str]) -> Dict[str, Path]:\n in_paths = dict()\n for name, endpoint in inputs.items():\n target_dir = _BASE_PATH / name\n target_dir.mkdir()\n target_file = target_dir / 'data.json'\n r = requests.get('{}/data.json'.format(endpoint))\n with target_file.open('wb') as f:\n f.write(r.content)\n in_paths[name] = target_file\n return in_paths", "def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)", "def _collect_entries(rootdir: str, basedir: str):\n\n files = []\n dirs = []\n\n for entry in os.listdir(os.path.join(rootdir, basedir)):\n rel_path = os.path.join(basedir, entry)\n full_path = os.path.join(rootdir, rel_path)\n isdir = os.path.isdir(full_path)\n if isdir and (rel_path in ('./.git', './.pytest_cache') or entry == '__pycache__'):\n continue\n\n st = os.stat(full_path, follow_symlinks=False)\n\n (dirs if isdir else files).append((rel_path, dict(isdir=isdir, path=rel_path, size=(0 if isdir else st.st_size),\n mode=st.st_mode, omode=f'{st.st_mode:04o}',\n mtime=int(st.st_mtime))))\n\n for rel_path, entry in sorted(dirs):\n yield entry\n yield from _collect_entries(rootdir, rel_path)\n\n for _, entry in sorted(files):\n yield entry", "def gen_folders(rho, kappa, km, pa, analysis, dbase, analysisdbase):\n \n path1 = 'density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa)\n path2 = analysis + '_density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa) + '.txt' \n datafolder = dbase + path1 + '/'\n analysisfile = analysisdbase + path2 \n\n return datafolder, analysisfile", "def translate_files(root_directory, output_file):\n # type: ()\n\n root_depth = len(root_directory.split(\"/\"))\n\n dirs = [d for d in os.walk(root_directory) if d[0].count(\"/\") == root_depth]\n\n sections = get_sections_data(dirs, root_depth=root_depth)\n\n # generate subsection headers\n for section, subsections in sections.iteritems():\n keys = sorted(subsections.keys(), key=lambda x: subsections[x][0])\n\n text = \"\"\n for subsection in keys:\n text += \"\\n\\\\subsection{\" + subsection + \"}\\n\"\n text += subsections[subsection][1]\n\n sections[section] = text\n\n # generate section headers\n result = \"\"\n for section, section_data in sections.iteritems():\n result += \"\\\\section{\" + section + \"}\\n\" + section_data + \"\\n\\n\\n\"\n\n # write output to file\n with open(output_file, \"w\") as test:\n with open(\"maple2latex/out/primer\") as primer:\n test.write(primer.read() + result + \"\\n\\\\end{document}\\n\")", "def create_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF,\n DIR_BACK, DIR_TEXT, DIR_FINAL)\n \n for dir in dirs:\n try:\n os.mkdir(os.path.join(cwd, dir))\n except OSError, e:\n print 'directory (', dir, ') already exists'", "def getpaths_fromdir(input_prefix_, directory_):\n path = os.path.join(input_prefix_, \"%s*\" % directory_, \"*\")\n return [tuple([directory_, path])]", "def create_file_structure(path, folders_names):\n mapping = {}\n if not os.path.exists(path):\n os.mkdir(path)\n for name in folders_names:\n dir_ = os.path.join(path, name)\n if not os.path.exists(dir_):\n os.mkdir(dir_)\n mapping[name] = dir_\n return mapping", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def collect_output(workdir_path, outputs):\n output_dict = {}\n for output_parameter in outputs:\n if 'id' not in output_parameter:\n exit_validation(\"Error: output without id member\")\n if 'type' not in output_parameter:\n exit_validation(\"Error: output without type member\")\n if output_parameter['type'] != 'File':\n exit_system_error(\"Sorry, I only know about File outputs\")\n if 'outputBinding' in output_parameter:\n binding = output_parameter['outputBinding']\n paths = []\n if 'glob' in binding:\n paths = glob.glob(os.path.join(workdir_path, binding['glob']))\n log(\"Paths after globbing: \" + str(paths))\n if paths != []:\n output_dict[output_parameter['id']] = {\n 'class': 'File',\n 'location': 'file:///' + paths[0]\n }\n return output_dict", "def createPathY(inputfile):\n dic = OrderedDict()\n with open(inputfile) as fin:\n for n, line in enumerate(fin, start=1):\n arr = line.strip().split()\n path, lbls = arr[0], arr[1:]\n dic[path] = lbls\n if n == 1:\n home = dirname(path)\n return n, home, dic", "def create_directory_list(root_dir: str):\n if not os.path.exists(root_dir):\n raise FileNotFoundError(\"Directory {} does not exist\".format(root_dir))\n\n # List all directories associated to different videos.\n recording_path_list = [os.path.join(root_dir, f) for f in os.listdir(root_dir)]\n\n input_data_path = []\n for g in recording_path_list:\n # Append the different directories associated to different video frame intervals.\n input_data_path.extend([os.path.join(g, f) for f in os.listdir(g)])\n\n return input_data_path", "def collect2dict(filenames, outdir):\n \n tbldict = {}\n for fn in filenames:\n try:\n path = max(glob.glob(outdir+fn+'*.pkl'), key=os.path.getctime)\n out = pd.read_pickle(path)\n tbldict[fn] = out\n except ValueError:\n print(fn + ' not found in ' + outdir)\n return tbldict", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])", "def loadFileNameByModel(self, inputDir):\n fileNames = walktree(inputDir)\n fileByModel = {}\n for file in fileNames:\n modelName = file.split('/')[-1]\n modelName = modelName.replace('.txt', '')\n fileByModel[modelName] = file\n return fileByModel", "def collect_input_files(input_directory_path: Path) -> Generator[Path, None, None]:\n return input_directory_path.glob('**/*')", "def collect(dname='.'):\n files = {}\n\n for paths in os.walk(dname):\n for fname in paths[2]:\n flen = len(fname)\n fpath = os.path.join(paths[0], fname)\n try:\n files[flen].append(fpath)\n except KeyError:\n files[flen] = [fpath]\n\n return files", "def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)", "def hashFiles(directory):\r\n\tfiledict = {}\r\n\tfor path in getFilePaths(directory):\r\n\t\twith open(path, 'rb') as file:\r\n\t\t\tfiledict[hashlib.md5(file.read()).hexdigest()] = path\r\n\treturn filedict", "def index_files(self, input_dir, output_dir):\n self.lucene = Lucene(output_dir)\n self.lucene.open_writer()\n for path, dirs, _ in os.walk(input_dir):\n for dir in sorted(dirs):\n for _, _, files in os.walk(os.path.join(input_dir, dir)):\n for fn in sorted(files):\n print \"Indexing \", os.path.join(input_dir + dir, fn), \"...\"\n self.index_file(os.path.join(input_dir + dir, fn))\n # closes Lucene index\n self.lucene.close_writer()", "def _set_dirs(self, datafolder):\n self.List_of_dir = []\n self.List_of_files = dict()\n folders = os.listdir(datafolder)\n folders.sort()\n for i in folders:\n if os.path.isdir(os.path.join(datafolder,i)) and i != '.ipynb_checkpoints': # ignore .ipynb_checkpoints, allowing the generator to work in Amazon\n self.List_of_dir.append(os.path.join(datafolder,i))\n self.List_of_files[os.path.join(datafolder,i)]=[]\n for file in os.listdir(os.path.join(datafolder, i, 'Input')):\n if file.split('.')[-1] == 'hdf5':\n self.List_of_files[os.path.join(datafolder,i)].append(file.split('.')[-2])\n self._nb_dir = len(self.List_of_dir)", "def _build_lut(self):\n lut = {}\n for k, v in self.paths.items(): # k:v, <zipfile#>:<path of the extracted archive>\n for _, _, filenames in tf.io.gfile.walk(v):\n for fileName in filenames:\n lut[fileName] = os.path.join(v, 'images', fileName)\n return lut", "def pele_folders(input_, file_list, dir_=None):\r\n os.chdir(\"../\")\r\n if not dir_:\r\n base = basename(input_)\r\n base = base.replace(\".pdb\", \"\")\r\n else:\r\n base = dir_\r\n count = 0\r\n folder = []\r\n for files in file_list:\r\n name = basename(files)\r\n name = name.replace(\".pdb\", \"\")\r\n if not count:\r\n hold = \"bla\"\r\n count += 1\r\n if name != \"original\" and hold != name[:-1]:\r\n hold = name[:-1]\r\n folder.append(\"mutations_{}/{}\\n\".format(base, hold))\r\n with open(\"dirnames_{}.txt\".format(base), \"w\") as txt:\r\n txt.writelines(folder)", "def directory_to_json(self, path, list_in):\n directory_json = {\"base_path\": path, \"files\": list_in}\n return directory_json", "def builddictionary(dirlist):\n init_dictionary={}\n for string in dirlist:\n splitstring=string.split(\"\\t\")\n if len(splitstring) == 2:\n init_dictionary[splitstring[1].strip(\"\\n\")] = [int(splitstring[0]), 0]\n return init_dictionary", "def _update_files():\n configuration_settings = get_configuration()\n\n # Need to find all of the files that are stored in the input_files directories in order to start building the\n # reports that will be used to generate the static log files.\n for input_path in configuration_settings.processing.inputs:\n search_path = pathlib.Path(input_path)\n\n # Currently going to make the assumption that everyone is using the path naming convention that I'm dictating\n # which is YYYY/MM/DD/file.ext\n for file_component in search_path.glob('*/*/*/*'):\n # Store all of the files into a dictionary containing the keys and a list of the files that are associated\n # with that day\n updaters.update_files(search_path, file_component)", "def gen_recursive_filelist(d):\n \n for root, directories, files in os.walk(d):\n for file in files:\n yield os.path.join(root, file)", "def create_path_dict(save_path):\n act_fn = [sorted(['relu', 'antirelu', 'identity', 'tanh', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'tanh']),\n sorted(['relu', 'antirelu', 'sigmoid', 'tanh']),\n sorted(['relu', 'identity', 'sigmoid', 'tanh']),\n sorted(['antirelu', 'identity', 'sigmoid', 'tanh']),\n ['relu'],\n ['sigmoid'],\n ['tanh'],\n ['antirelu'],\n ['None']]\n # ['identity']]\n\n act_fn = ['_'.join(act) for act in act_fn]\n path_dict = defaultdict(list)\n for (filepath, dirname, filename) in os.walk(save_path):\n if 'results.json' in filename:\n for act in act_fn:\n temp = filepath.split('/')\n if act == temp[-1] or act == temp[-2]:\n path_dict[act].append(filepath)\n print(path_dict)\n return path_dict", "def make_all_files_dictionary(self, all_files, append_to_this=False):\n if append_to_this:\n rdict = append_to_this\n else:\n rdict = {}\n\n all_files.sort()\n for i in all_files:\n count = len(rdict) + 1\n i = os.path.abspath(os.path.expanduser(i))\n\n if platform.system() == \"Windows\":\n full_filename = i.split('\\\\')\n else:\n full_filename = i.split('/')\n\n full_filename = full_filename[-1]\n\n extension = full_filename.split('.')\n extension = extension[-1]\n extension = extension.upper()\n\n filename = full_filename.split('.')\n filename.pop(-1)\n filename = '.'.join(filename)\n\n rdict[i] = dict(\n path=i,\n processed=False,\n drawn=False,\n count=count,\n filename=filename,\n extension=extension,\n status='UNPROCESSED',\n )\n\n return rdict", "def create_directory_structure():\n\n def ensure_directory(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ensure_directory('./out/textures')\n ensure_directory('./out/data')", "def main(input_dir, output_dir):\n\n process(input_dir, output_dir)", "def find_all_infilepaths(in_dir):\n workdir = os.getcwd()\n os.chdir(in_dir)\n\n infiles_paths = dict()\n for infilename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n pos = infilename.split('_')\n pos[-1] = pos[-1].split('.')[0]\n pos = tuple(list(map(lambda s: int(s), pos)))\n num_pos = _3d_to_numeric\n infiles_paths[num_pos] = os.path.join(in_dir, infilename)\n\n os.chdir(workdir)\n return infiles_paths", "def get_objects(dirname):\n\n objects = os.listdir(dirname)\n temp_map = []\n\n for obj in objects:\n\n fpath = os.path.join(dirname, obj)\n\n if fpath[0:2] == \"./\":\n fpath = fpath[2:]\n\n # if the object is a file, store it as a file\n if os.path.isfile(fpath):\n\n temp_map.append({\"name\": fpath,\n \"is_file\": True,\n \"children\": []})\n\n # else, assume the object is a directory\n else:\n\n children_map = get_objects(fpath)\n temp_map.append({\"name\": fpath,\n \"is_file\": False,\n \"children\": children_map})\n\n return temp_map", "def dirGenerator(datadirectory):\n\n subdirectories = [row for row in os.listdir(datadirectory) if '$' not in row]\n\n #iterate through subdirectories\n for day in subdirectories:\n\n #collect raw data set file names in sub directories\n fileNames = [row for row in os.listdir(datadirectory + day + '\\\\RawDataFiles\\\\')]\n\n #iterate over the raw datasets\n print 'There are ' + str(len(fileNames)) + ' datasets in ' + day\n for index, datafile in enumerate(fileNames):\n yield datadirectory + day + '\\\\RawDataFiles\\\\' + datafile, day, datafile, index", "def mkDir(infile, subdir, verbose=False):\n\n # Write to ROOT File\n RFile = ROOT.TFile(infile, \"UPDATE\")\n RFile.cd()\n\n sdir = []\n\n if ( not RFile.GetDirectory(subdir) ):\n sdir = RFile.GetDirectory(subdir)\n else:\n sdir = getDir(infile, subdir, verbose=False)\n\n return sdir", "def process_folder(root, path=\"\"):\n myDict = {}\n if path:\n if root.cd(path):\n for key in ROOT.gDirectory.GetListOfKeys():\n filterKey(root, key, path, myDict, \"__List\")\n else:\n for key in ROOT.gDirectory.GetListOfKeys():\n mypath = ROOT.gDirectory.GetPathStatic()\n filterKey(root, key, mypath, myDict, \"\")\n ROOT.gDirectory.cd(mypath)\n return myDict", "def _create_index_files(root_dir, force_no_processing=False):\n # Initialise list of created file paths to build up as we make them\n created_files = []\n # Walk the root dir downwards, creating index files as we go\n for here, dirs, files in os.walk(root_dir):\n print('Processing %s' % here)\n\n # Sort the subdirectories by name\n dirs = sorted(dirs)\n\n # Get image files - sort all files in the directory matching IMAGE_FILE_REGEX\n image_files = sorted([f for f in files if re.match(IMAGE_FILE_REGEX, f, re.IGNORECASE)])\n\n # Get image files - sort all files in the directory matching VIDEO_FILE_REGEX\n video_files = sorted([f for f in files if re.match(VIDEO_FILE_REGEX, f, re.IGNORECASE)])\n\n # Create this directory's index file and add its name to the created\n # files list\n created_files.append(\n _create_index_file(\n root_dir, here, image_files, video_files, dirs, force_no_processing\n )\n )\n # Return the list of created files\n return created_files", "def SetInputDirs(dirname):\n global indir\n\n indir = dirname\n tout.Debug(\"Using input directories %s\" % indir)", "def path_generator(initial_root):\n for root, dirs, files in os.walk(initial_root):\n paths = [os.path.join(root, name) for name in files]\n return paths", "def walkthrough(software_map):\n\n for i in software_map:\n\n if not i[\"is_file\"]:\n\n # for each directory: make a index.md\n dname = \"./docs/\" + i[\"name\"]\n index = \"./docs/\" + i[\"name\"] + \"/index.md\"\n print(index)\n os.mkdir(dname)\n\n with open(index, \"w+\") as f:\n\n children = i[\"children\"]\n\n # list files\n f.write(\"Files:\\n\\n\")\n for i in children:\n if i[\"is_file\"]:\n\n fname = i[\"name\"]\n fext = fname.split(\".\")\n if len(fext) == 2:\n fext = fext[1]\n else:\n fext = \"none\"\n # for each file, note name and extension\n f.write(fname + \" : \" + fext + \"\\n\")\n\n # list subdirectories\n f.write(\"\\nSubdirectories:\\n\\n\")\n for i in children:\n if not i[\"is_file\"]:\n\n dirname = i[\"name\"]\n\n # note the number of files and subdirs in it\n num_files, num_dirs = 0, 0\n for child in i[\"children\"]:\n if child[\"is_file\"]:\n num_files += 1\n elif not child[\"is_file\"]:\n num_dirs += 1\n\n # note down name and numbers for each dir\n f.write(dirname + \" : \" + str(num_files) + \" files, \" +\n str(num_dirs) + \" directories\\n\")\n\n # goto subdir\n if len(i[\"children\"]) > 0:\n walkthrough(i[\"children\"])", "def path_to_dict(self, someDir, level=9001, relativeFolders=True, relativeFiles=False):\n someDir = someDir.rstrip(os.path.sep)\n assert os.path.isdir(someDir)\n numSep = someDir.count(os.path.sep)\n\n outputDict = {}\n for root, dirs, files in os.walk(someDir):\n for d in dirs + files:\n path = os.path.join(root, d)[(len(someDir)):]\n path = path.rstrip(os.sep).lstrip(os.sep)\n pathSplit = paths.os_path_split_asunder(path)\n if os.path.isfile(os.path.join(root, d)) and not relativeFiles:\n pathSplit[-1] = os.path.join(root, d)\n if len(pathSplit) == 1:\n outputDict[pathSplit[0]] = {}\n else:\n nestedDict = self.list_flattened_to_dict(pathSplit)\n mergedDict = dict(mergedicts(outputDict, nestedDict))\n for key in nestedDict.keys():\n outputDict = dict(outputDict, **nestedDict)\n outputDict = dict(outputDict, **mergedDict)\n\n numSepCurrent = root.count(os.path.sep)\n if numSep + level <= numSepCurrent:\n del dirs[:]\n return outputDict", "def create_dictionary(file_dir):\r\n\tword_list = []\r\n\tfile_list = read_files(file_dir, \"lab\") # step 7\r\n\tfor file in file_list:\r\n\t\twith open(file, 'r') as f:\r\n\t\t\ttext = f.read()\r\n\t\tword_list = store_to_dictionary(text, word_list) # step 8cii\r\n\tmake_dictionary_file(file_dir, word_list) # step 9\r", "def fingerprint(dirnames, prefix=None, previous=[]):\n #pylint:disable=dangerous-default-value\n results = []\n for dirname in dirnames:\n for filename in os.listdir(dirname):\n fullpath = os.path.join(dirname, filename)\n if os.path.isdir(fullpath):\n results += fingerprint(\n [fullpath], prefix=filename, previous=previous)\n else:\n fullname = fullpath\n if prefix and fullname.startswith(prefix):\n fullname = fullname[len(prefix):]\n found = False\n for prevpath in previous:\n if fullname == prevpath['Key']:\n found = True\n break\n if not found:\n mtime = datetime.datetime.fromtimestamp(\n os.path.getmtime(fullpath), tz=utc)\n results += [{\"Key\": fullname,\n \"LastModified\": mtime.strftime(\n '%a, %d %b %Y %H:%M:%S %Z')}]\n return results", "def process_file_list(input_root, output_root, file_list):\n for backup_id, backup_file in file_list.items():\n # logging.debug(f\"{backup_id}: {backup_file.relative_path}\")\n if backup_file.is_dir:\n create_directory(backup_file, output_root)\n else:\n create_file(backup_file, input_root, output_root)", "def init_dirs(paths):\n in_dir = os.path.join(paths[\"ssd_path\"], \"input_files\")\n out_dir = os.path.join(paths[\"ssd_path\"], \"output_files\")\n\n workdir = os.getcwd()\n for dirpath in [in_dir, out_dir]:\n if not os.path.isdir(dirpath):\n os.mkdir(dirpath) # create dir\n else: # clean dir\n os.chdir(dirpath)\n for filename in glob.glob(\"*.hdf5\"):\n os.remove(filename)\n os.chdir(workdir)\n return in_dir, out_dir", "def get_datapaths(input_dir):\n image_paths = []\n assert os.path.isdir(input_dir), f\"{input_dir} is not existed\"\n\n for root, _, names in os.walk(input_dir):\n for name in names:\n path = os.path.join(root, name)\n image_paths.append(path)\n return image_paths", "def get_input_files(self, action):\n assert action == \"run\", \"Unsupported action\"\n return {\n \"bam\": \"work/{mapper_lib}/out/{mapper_lib}.bam\",\n \"bai\": \"work/{mapper_lib}/out/{mapper_lib}.bam.bai\",\n }", "def get_input_files(self, action):\n assert action == \"run\", \"Unsupported action\"\n return {\n \"bam\": \"work/{mapper_lib}/out/{mapper_lib}.bam\",\n \"bai\": \"work/{mapper_lib}/out/{mapper_lib}.bam.bai\",\n }", "def _generate_file_paths(self):\n for table_name in self.tables:\n logger.info(f\"Generating input and output paths for table '{table_name}'...\")\n self.input_paths[table_name] = os.path.join(self.pipeline['input_dir'], f'{table_name}.xml')\n logger.info(f\"Input path for table '{table_name}': {self.input_paths[table_name]}\")\n self.output_paths[table_name] = os.path.join(self.pipeline['output_dir'], f'{table_name}.jsonl')\n logger.info(f\"Output path for table '{table_name}': {self.output_paths[table_name]}\")\n logger.info(f\"Generated {len(self.input_paths)} input paths and {len(self.output_paths)} output paths.\")", "def scandir(path='.'):\r\n for name in os.listdir(path):\r\n yield GenericDirEntry(path, name)", "def filelist(folder):\n file_dict={}\n folderlist = glob.glob(os.getcwd()+\"/\"+folder+\"/*\")\n for i in tqdm(folderlist):\n filelist = glob.glob(i+\"/*\")\n filename = i.rsplit(\"/\")[-1]\n file_dict[filename]= filelist\n\n return file_dict", "def compress_skim_dir(directory, output=\"zarr\"):\n\n if output not in (\"zarr\", \"zarr.zip\"):\n raise NotImplementedError(output)\n\n if output == \"zarr\":\n if not os.path.exists(directory+\".zarr\"):\n os.makedirs(directory+\".zarr\")\n elif output == \"zarr.zip\":\n if os.path.exists(directory+\".zarr.zip\"):\n raise FileExistsError(directory+\".zarr.zip\")\n\n master = {}\n for f in os.walk(directory):\n for fi in f[2]:\n if \".emx\" in fi:\n arr = np.fromfile(fi, dtype='f4')\n side = int(np.sqrt(arr.size))\n arr = arr.reshape(side, side)\n tazrange = pd.RangeIndex(1, side+1)\n master[fi.replace(\".emx\", \"\")] = xr.DataArray(\n arr,\n dims=['otaz', 'dtaz'],\n coords={'otaz': tazrange, 'dtaz': tazrange}\n )\n\n master = sh.Dataset(master)\n\n if output == \"zarr\":\n master.to_zarr(directory+\".zarr\", mode='a')\n elif output == \"zarr.zip\":\n with zarr.ZipStore(directory+\".zarr.zip\", mode='w') as store:\n master.to_zarr(store)\n return master", "def build_merged_dir(smap, outdir):\n for (s,sl) in smap.iteritems():\n write_list(merge_one_sensor(sl), os.path.join(outdir,s+\".csv\"))", "def open_output_files(self):\n if not os.path.exists(self.outputDictionaryPath):\n os.makedirs(self.outputDictionaryPath)\n\n self.XMLfile = open(os.path.join(self.outputDictionaryPath, 'MyDictionary.xml'), 'w+', encoding='utf-8') # this is the output file\n self.Makefile = open(os.path.join(self.outputDictionaryPath, 'Makefile'), 'w+', encoding='utf-8')\n self.MyInfoFile = open(os.path.join(self.outputDictionaryPath, 'MyInfo.plist'), 'w+', encoding='utf-8')", "def list_files_in_directory(self):\n lesson_file_dict = dict()\n lesson_file_dict[\"files\"] = []\n\n directory_list = listdir(self.sub_dir)\n for directory in directory_list:\n if isfile(join(self.sub_dir, directory)):\n lesson_file_dict[\"files\"].append(directory)\n\n return lesson_file_dict", "def _anonymize_files(dicom_directory_in, dicom_directory_out, fields_to_keep):\n\n # Make sure we have absolute paths\n dicom_directory_in = os.path.abspath(dicom_directory_in)\n dicom_directory_out = os.path.abspath(dicom_directory_out)\n\n # looping over all files\n for root, _, file_names in os.walk(dicom_directory_in):\n # New directory\n\n for file_name in file_names:\n # Create instance_UID\n fields_to_keep['SOPInstanceUID'] = pydicom.uid.generate_uid()\n\n dicom_file_in = os.path.join(root, file_name)\n current_dir = root[len(dicom_directory_in) + 1:]\n dicom_file_out = os.path.join(dicom_directory_out, current_dir, file_name)\n if common.is_dicom_file(dicom_file_in):\n logging.info(\"Processing \" + dicom_file_in)\n _anonymize_file(dicom_file_in, dicom_file_out, fields_to_keep)\n else:\n logging.info(\"Skipping \" + dicom_file_in + \", no dicom file\")", "def load_files(directory):\n import os\n import re\n\n files = dict()\n\n for file in os.scandir(directory):\n if re.search(\".txt$\", file.name):\n with open(file.path, \"r\", encoding=\"utf8\") as f:\n # re.sub(\".txt$\", \"\", file.name)\n files[file.name] = f.read()\n\n return files", "def CreateDirs(self):\n# First, create a list of directories.\n dnames = []\n tags = ['', '_m', '_mf']\n for entry in self.info.keys():\n if self.info[entry]['type'] == 'epi':\n for tag in tags:\n fname = self.info[entry].get('imgfile%s' % tag, None)\n if fname is not None:\n dnames.append(os.path.dirname(fname))\n else:\n if self.info[entry].get('outdir',None) is not None:\n dnames.append(self.info[entry]['outdir'])\n\n# Create them if they don't already exist.\n for dname in dnames:\n if not os.path.exists(dname):\n self.MakeDir(dname)\n if self.verbose:\n print 'mkdir %s' % dname", "def walk(self):\n for _root, _dirs, files in os.walk(self.root):\n for filename in files:\n if self.is_key(filename):\n yield filename", "def map_path(path, filters=[]):\n\n def _internal(arg, path, names):\n path_list = path.split('/')\n target = rget(arg, path_list[:-1])\n target[path_list[-1]] = {name: \"{}/{}\".format(path, name) for name in names}\n return target\n\n result = {}\n os.path.walk(path, _internal, result)\n return result", "def create_station_dics(data_directories):\n \n files_all = {} \n for k,v in data_directories.items() :\n files = os.listdir(v)\n \n for f in files:\n station = f.split('_')[0] \n if station not in files_all.keys():\n files_all[station] = {}\n \n if k == 'ncar': # separating ncar temperature and wind files \n if 'trhc' in f:\n k = 'ncar_t'\n elif 'windc' in f:\n k = 'ncar_w'\n files_all[station][k] = ''\n files_all[station][k] = v + '/' + f # compelte path to the netCDF file \n\n #print('check') \n \n \n return files_all", "def extract_folder_file_structure() -> Dict[str, List[str]]:\n folders_and_files = {}\n for path_to_folder in glob.glob(f\"{ZULIPTERMINAL}/**/\", recursive=True):\n complete_directory_path = Path(path_to_folder)\n if complete_directory_path.name in FOLDERS_TO_EXCLUDE:\n continue\n relative_directory_path = complete_directory_path.relative_to(ROOT_DIRECTORY)\n if str(relative_directory_path) not in DESC_FOR_NO_FILE_FOLDERS:\n files_in_directory = [\n file.name\n for file in complete_directory_path.glob(\"*.py\")\n if file.name != \"__init__.py\"\n ]\n folders_and_files[str(relative_directory_path)] = files_in_directory\n return folders_and_files", "def create_input_files(in_dir, R, I):\n def get_filepath(in_volume, infiles_partition):\n _3d_pos = numeric_to_3d_pos(in_volume.index, infiles_partition, order='F')\n i, j, k = _3d_pos\n out_filename = f'{i}_{j}_{k}.hdf5'\n return os.path.join(in_dir, out_filename)\n\n infiles_partition = get_blocks_shape(R, I)\n infiles_volumes = get_named_volumes(infiles_partition, I)\n for in_volume in infiles_volumes:\n filepath = get_filepath(in_volume, infiles_partition)\n arr = create_random_dask_array(I, distrib='normal', dtype=np.float16)\n save_to_hdf5(arr, filepath, physik_cs=None, key='/data', compression=None)", "def convert_all(input: str, out: str):\n dateien = listdir(input)\n for datei in dateien:\n out_datei = datei.replace(\" \", \"_\") # Leertasten durch Unterstriche ersetzen\n convert_image(input + datei, out + out_datei)", "def __concatonate_files_controller(self):\n\n # find all barcode file paths\n barcode_directories = []\n for root, directory, files in os.walk(self.input_directory):\n for name in directory:\n barcode_directories.append( os.path.join(root, name) )\n\n # iterate through each barcode directory, item is the file path\n for item in barcode_directories:\n file = os.listdir(item)[0]\n path = item\n\n new_file_name = self.__return_new_file_name(file_name=file, file_path=path)\n self.__concatonate_files(new_file_name=new_file_name, parent_folder=path)\n self.__write_logs_to_file(new_file_name)", "def initialize(self) -> typing.NoReturn:\n\t\tfor root, dirs, files in os.walk(INPUT_DIRECTORY, topdown=False):\n\t\t\tfor fileName in files:\n\t\t\t\tif fileName.endswith('.py'):\n\t\t\t\t\tself.moduleNameSet.add(os.path.join(root, fileName))", "def write_output_data_to_disk(\n output_data_dict,\n output_directory=\"./\",\n output_file_suffix=\".nii.gz\",\n overwrite_existing_files=False,\n):\n if output_data_dict is None:\n return\n\n filename_fields = [i for i in output_data_dict.keys() if i != \"parent_sorting_data\"]\n parent_sorting_data = output_data_dict[\"parent_sorting_data\"]\n\n files_written = {}\n\n \"\"\"\n Write the the converted images to disk\n\n ! CONSIDER\n We could simply write as we go?\n Pro: save memory, important if processing very large files\n Con: Reading as we go allows proper indexing\n\n \"\"\"\n\n for field in filename_fields:\n logger.info(\" Writing files for field: %s\", field)\n p = pathlib.Path(output_directory) / parent_sorting_data / field\n p.mkdir(parents=True, exist_ok=True)\n files_written[field] = []\n\n for field_filename_base, field_list in output_data_dict[field].items():\n # Check if there is a list of images with matching names\n # This will depend on the name format chosen\n # If there is a list, we append an index as we write to disk\n\n if isinstance(field_list, (tuple, list)):\n # Flatten\n field_list_flat = list(flatten(field_list))\n\n # Iterate\n for suffix, file_to_write in enumerate(field_list_flat):\n field_filename = field_filename_base + f\"_{suffix}\"\n\n # Some cleaning\n while \"__\" in field_filename:\n field_filename = field_filename.replace(\"__\", \"_\")\n\n while field_filename[-1] == \"_\":\n field_filename = field_filename[:-1]\n\n # Save image!\n output_name = (\n pathlib.Path(output_directory)\n / parent_sorting_data\n / field\n / (field_filename + output_file_suffix)\n )\n files_written[field].append(output_name)\n\n if output_name.is_file():\n logger.warning(\" File exists: %s\", output_name)\n\n if overwrite_existing_files:\n logger.warning(\" You have selected to overwrite existing files.\")\n\n else:\n logger.info(\n \" You have selected to NOT overwrite existing files. Continuing.\"\n )\n continue\n\n sitk.WriteImage(file_to_write, output_name.as_posix())\n\n else:\n field_filename = field_filename_base\n file_to_write = field_list\n\n # Some cleaning\n while \"__\" in field_filename:\n field_filename = field_filename.replace(\"__\", \"_\")\n\n while field_filename[-1] == \"_\":\n field_filename = field_filename[:-1]\n\n # Save image!\n \"\"\"\n ! TO DO\n Use pathlib, and perform some checks so we don\"t overwrite anything!\n \"\"\"\n output_name = (\n pathlib.Path(output_directory)\n / parent_sorting_data\n / field\n / (field_filename + output_file_suffix)\n )\n files_written[field].append(output_name)\n\n if output_name.is_file():\n logger.warning(\" File exists: %s\", output_name)\n\n if overwrite_existing_files:\n logger.warning(\" You have selected to overwrite existing files.\")\n\n else:\n logger.info(\n \" You have selected to NOT overwrite existing files. Continuing.\"\n )\n continue\n\n sitk.WriteImage(file_to_write, output_name.as_posix())\n\n return files_written", "def create_folder_structure(ck_dir, out_dir):\n make_dirs(out_dir)\n train_out_dir = out_dir + '/train'\n if not os.path.exists(train_out_dir):\n os.mkdir(train_out_dir)\n\n for sdir in os.listdir(ck_dir):\n spath = os.path.join(ck_dir, sdir)\n for ddir in os.listdir(spath):\n dpath = os.path.join(spath, ddir)\n if os.path.isdir(dpath):\n os.chdir(dpath)\n else:\n print(\"not a dir:\", dpath)\n emotion_txt = glob.glob('*emotion*')\n if len(emotion_txt) == 1:\n add_emotion(os.path.join(dpath, emotion_txt[0]), train_out_dir)\n elif len(emotion_txt) > 1:\n print(emotion_txt)\n test(train_out_dir)", "def create_output_filenames(input_file, output_folder, method):\n\n # Create output folder\n if os.path.exists(output_folder):\n os.system('rm -rf %s' % output_folder)\n os.makedirs(output_folder, exist_ok=True)\n os.chdir(output_folder)\n\n # Create output filenames\n screen_name = '%s_%s' % (input_file.split('/')[-1][:-4], method)\n output = {'ODresults': '%s_OD_results.csv' % screen_name,\n 'ScoreCells': '%s_OD_results_score.csv' % screen_name,\n 'Outliers': '%s_outlier_cells.csv' % screen_name,\n 'PeneAgreement': '%s_penetrance_agreement.png' % screen_name,\n 'Penetrance': '%s_penetrance.png' % screen_name,\n 'PenetranceControls': '%s_penetrance_controls.png' % screen_name,\n 'KS_Correlation': '%s_KS_correlation.png' % screen_name,\n 'WT_Percentile': '%s_WT_percentile.png' % screen_name,\n 'PCA': '%s_PCA.png' % screen_name,\n 'UMAP': '%s_UMAP.png' % screen_name,\n 'log': '%s_log.txt' % screen_name\n }\n\n return output", "def output_files(filepath):\n\n infile = open(filepath, 'r')\n lines = infile.readlines()\n\n rel_path = './'\n rel_path += lines[6][lines[6].find(':')+1:].strip()\n rel_path += lines[7][lines[7].find(':')+1:].strip()\n\n filename_I1 = lines[9][lines[9].find(':')+1:].strip()\n filename_I2 = lines[10][lines[10].find(':')+1:].strip()\n filename_IW = lines[12][lines[12].find(':')+1:].strip()\n filename_WE = lines[13][lines[13].find(':')+1:].strip()\n filename_CFLx = lines[15][lines[15].find(':')+1:].strip()\n filename_CFLv = lines[16][lines[16].find(':')+1:].strip()\n filename_S = lines[18][lines[18].find(':')+1:].strip()\n\n filepath_I1 = rel_path + filename_I1\n filepath_I2 = rel_path + filename_I2\n filepath_IW = rel_path + filename_IW\n filepath_WE = rel_path + filename_WE\n # filepath_CFLx = rel_path + filename_CFLx\n # filepath_CFLv = rel_path + filename_CFLv\n filepath_S = rel_path + filename_S\n\n outfile_I1 = open(filepath_I1, 'w')\n outfile_I2 = open(filepath_I2, 'w')\n outfile_IW = open(filepath_IW, 'w')\n outfile_WE = open(filepath_WE, 'w')\n # outfile_CFLx = open(filepath_CFLx, 'w')\n # outfile_CFLv = open(filepath_CFLv, 'w')\n outfile_S = open(filepath_S, 'w')\n\n outfiles = dict(I1 = outfile_I1,\n I2 = outfile_I2,\n IW = outfile_IW,\n WE = outfile_WE,\n # CFLx = outfile_CFLx,\n # CFLv = outfile_CFLv,\n S = outfile_S)\n\n return outfiles", "def output_files(filepath):\n\n infile = open(filepath, 'r')\n lines = infile.readlines()\n\n rel_path = './'\n rel_path += lines[6][lines[6].find(':')+1:].strip()\n rel_path += lines[7][lines[7].find(':')+1:].strip()\n\n filename_I1 = lines[9][lines[9].find(':')+1:].strip()\n filename_I2 = lines[10][lines[10].find(':')+1:].strip()\n filename_IW = lines[12][lines[12].find(':')+1:].strip()\n filename_WE = lines[13][lines[13].find(':')+1:].strip()\n filename_CFLx = lines[15][lines[15].find(':')+1:].strip()\n filename_CFLv = lines[16][lines[16].find(':')+1:].strip()\n filename_S = lines[18][lines[18].find(':')+1:].strip()\n\n filepath_I1 = rel_path + filename_I1\n filepath_I2 = rel_path + filename_I2\n filepath_IW = rel_path + filename_IW\n filepath_WE = rel_path + filename_WE\n # filepath_CFLx = rel_path + filename_CFLx\n # filepath_CFLv = rel_path + filename_CFLv\n filepath_S = rel_path + filename_S\n\n outfile_I1 = open(filepath_I1, 'w')\n outfile_I2 = open(filepath_I2, 'w')\n outfile_IW = open(filepath_IW, 'w')\n outfile_WE = open(filepath_WE, 'w')\n # outfile_CFLx = open(filepath_CFLx, 'w')\n # outfile_CFLv = open(filepath_CFLv, 'w')\n outfile_S = open(filepath_S, 'w')\n\n outfiles = dict(I1 = outfile_I1,\n I2 = outfile_I2,\n IW = outfile_IW,\n WE = outfile_WE,\n # CFLx = outfile_CFLx,\n # CFLv = outfile_CFLv,\n S = outfile_S)\n\n return outfiles", "def get_directory_structure(rootdir):\n rootdir = rootdir.rstrip(os.sep)\n start = rootdir.rfind(os.sep) + 1\n dir= {\"containers\": [rootdir]} \n for path, dirs, files in os.walk(rootdir):\n folders = path[start:].split(os.sep)\n\n subdir = dict.fromkeys(files)\n parent = functools.reduce(dict.get, folders[:-1], dir)\n \n config = get_container_config(path, folders, subdir)\n \n parent[folders[-1]] = {'containers': dirs}\n parent[folders[-1]].update(config)\n \n return dir", "def get_files(self):\n\n for path, dirs, files in os.walk(self.data_path):\n for dir in dirs:\n self.original_files[dir] = []\n self.imitation_files[dir] = []\n for file in os.listdir(path + \"/\" + dir):\n if( \"original\" in file ):\n self.original_files[dir].append(path + \"/\" + dir + \"/\" + file)\n else:\n self.imitation_files[dir].append(path + \"/\" + dir + \"/\" + file)\n\n return", "def copy_folder_to_folder_indexed(source_dir: str, target_dir: str) -> Dict[str, str]:\n index_file_name_dic = dict()\n count = len([name for name in os.listdir(target_dir) if os.path.isfile(os.path.join(target_dir, name))])\n\n files = os.listdir(source_dir)\n for file in files:\n source_path = os.path.join(source_dir, file)\n if os.path.isfile(source_path):\n target_path = os.path.join(target_dir, file)\n shutil.copy2(source_path, target_dir)\n indexed_file_name = str(count) + get_extension(file)\n os.rename(target_path, os.path.join(target_dir, indexed_file_name))\n count += 1\n index_file_name_dic[file] = indexed_file_name\n\n return index_file_name_dic", "def run(self):\n for lof in self.data_files:\n if lof[0]:\n base = getattr(self, 'install_' + lof[0])\n else:\n base = getattr(self, 'install_base')\n dir = convert_path(lof[1])\n if not os.path.isabs(dir):\n dir = os.path.join(base, dir)\n elif self.root:\n dir = change_root(self.root, dir)\n self.mkpath(dir)\n\n files = lof[2]\n if len(files) == 0:\n # If there are no files listed, the user must be\n # trying to create an empty directory, so add the\n # directory to the list of output files.\n self.outfiles.append(dir)\n else:\n # Copy files, adding them to the list of output files.\n for f in files:\n f = convert_path(f)\n (out, _) = self.copy_file(f, dir)\n #print \"DEBUG: \", out # dbg\n self.outfiles.append(out)\n \n\n return self.outfiles", "def get_all_hashes(data_dir):\n file_hashes = {}\n for root, dirs, files in os.walk(data_dir):\n for file_name in files:\n file_path = os.path.join(root, file_name)\n file_hashes[file_path] = generate_file_md5(file_path)\n return file_hashes", "def parse_directories_aux(node, parent_dir):\n download_locations = {}\n current_dir = parent_dir + \"/\" + node['name']\n pattern = node['pattern']\n download_locations[pattern] = current_dir\n\n if 'children' in node:\n for child_node in node['children']:\n subcall_ret_val = Parser.parse_directories_aux(child_node,\n current_dir)\n for pat in subcall_ret_val:\n if pat in download_locations:\n raise DuplicateRegexError(\"Duplicate regex - '\" +\n pat + \"' matches paths \" +\n download_locations[pat] +\n \" and \" + subcall_ret_val[pat])\n download_locations.update(subcall_ret_val)\n return download_locations", "def split_input_dirs(self, paths):\n\n for path in paths:\n yield path" ]
[ "0.6456172", "0.64561534", "0.63940656", "0.63858587", "0.63171786", "0.62454695", "0.61177707", "0.6060311", "0.60519314", "0.6003057", "0.5944946", "0.5938714", "0.59148794", "0.5861261", "0.5858004", "0.5832426", "0.5809002", "0.5801422", "0.5792834", "0.57921237", "0.5737989", "0.57112765", "0.5699804", "0.569584", "0.5685001", "0.5680526", "0.56767815", "0.56698686", "0.56468797", "0.5619446", "0.56081635", "0.56029135", "0.56019884", "0.5597204", "0.55853117", "0.55769295", "0.5572514", "0.5569905", "0.5561494", "0.5559046", "0.5531588", "0.55202395", "0.5513855", "0.5512406", "0.5510879", "0.55102456", "0.5507955", "0.550447", "0.5495666", "0.5495228", "0.5490307", "0.5470043", "0.5467664", "0.5466866", "0.5463549", "0.54605293", "0.54510045", "0.5446285", "0.5440541", "0.5439302", "0.54318905", "0.54248005", "0.5419491", "0.5415389", "0.541155", "0.5406727", "0.5402966", "0.53930485", "0.5381948", "0.5381948", "0.53757215", "0.53717124", "0.5370767", "0.5366648", "0.5363899", "0.5360074", "0.5357067", "0.5338436", "0.53286463", "0.532782", "0.5321131", "0.5320257", "0.5318253", "0.5316675", "0.5311982", "0.5311196", "0.5310037", "0.5309931", "0.53089404", "0.5306768", "0.5304697", "0.5302953", "0.5302953", "0.52959085", "0.5288949", "0.5286494", "0.52855515", "0.52843773", "0.5281997", "0.52802145" ]
0.53595054
76
Build the dictionary of all files to be processed in appropriate groupings.
def build_filelist(input_dir: str, syst: bool = False) -> dict: filedict = { idir.split('SYST_')[-1].split('/')[0]: {} for idir in glob('{}/*'.format(input_dir)) if 'SYST_' in idir } filedict['nominal'] = build_groupings(f'{input_dir}/NOMINAL') if syst: for idir in filedict.keys(): if idir == 'nominal': continue elif 'Rivet' in idir: continue filedict[idir] = build_groupings(f'{input_dir}/SYST_{idir}') else: filedict = {'nominal': filedict['nominal']} pprint(filedict, width=150) return filedict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_groupings(idir: str) -> dict:\n bkg_group = {key: [ifile for ifile in glob(f'{idir}/*_{key}_*.root')] for key in bkgs}\n pw_group = {key: [ifile for ifile in glob(f'{idir}/{key}*.root')] for key in powhegs}\n wh_pw_group = [ifile for name in wh_powhegs for ifile in glob(f'{idir}/{name}*.root')]\n ungrouped = [ifile for ifile in glob(f'{idir}/*.root') if 'madgraph' in ifile or 'JHU' in ifile]\n\n group = {}\n for key, files in bkg_group.items():\n if len(files) > 0:\n group[key] = files\n\n for key, files in pw_group.items():\n if len(files) > 0:\n group[key] = files\n\n for ifile in ungrouped:\n name = ifile.split('/')[-1].replace('.root', '')\n name = name.split('_SYST')[0].replace('-', '_')\n name = name.replace('_ggH125', '').replace('_VBF125', '').replace('_WH125', '').replace('_ZH125', '')\n group[name] = [ifile]\n\n if len(wh_pw_group) > 0:\n group['wh125_powheg'] = wh_pw_group\n\n return group", "def defineFileGroups(self, mergeableFiles):\n fileGroups = {}\n foundFiles = []\n\n for mergeableFile in mergeableFiles:\n if mergeableFile[\"file_lfn\"] not in foundFiles:\n foundFiles.append(mergeableFile[\"file_lfn\"])\n else:\n continue\n\n if mergeableFile[\"pnn\"] not in fileGroups:\n if self.mergeAcrossRuns:\n fileGroups[mergeableFile[\"pnn\"]] = []\n else:\n fileGroups[mergeableFile[\"pnn\"]] = {}\n\n if self.mergeAcrossRuns:\n fileGroups[mergeableFile[\"pnn\"]].append(mergeableFile)\n else:\n if mergeableFile[\"file_run\"] not in fileGroups[mergeableFile[\"pnn\"]]:\n fileGroups[mergeableFile[\"pnn\"]][mergeableFile[\"file_run\"]] = []\n fileGroups[mergeableFile[\"pnn\"]][mergeableFile[\"file_run\"]].append(mergeableFile)\n\n return fileGroups", "def create_file_dict():\n import os\n file_dict = {}\n for root, dirs, files in os.walk('.'):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n for f in files:\n try:\n with open(f, 'r') as thing:\n res = thing.readline()\n except:\n res = ''\n file_name = os.path.join(root, f).lstrip('./')\n file_dict[file_name] = res\n return file_dict", "def make_all_files_dictionary(self, all_files, append_to_this=False):\n if append_to_this:\n rdict = append_to_this\n else:\n rdict = {}\n\n all_files.sort()\n for i in all_files:\n count = len(rdict) + 1\n i = os.path.abspath(os.path.expanduser(i))\n\n if platform.system() == \"Windows\":\n full_filename = i.split('\\\\')\n else:\n full_filename = i.split('/')\n\n full_filename = full_filename[-1]\n\n extension = full_filename.split('.')\n extension = extension[-1]\n extension = extension.upper()\n\n filename = full_filename.split('.')\n filename.pop(-1)\n filename = '.'.join(filename)\n\n rdict[i] = dict(\n path=i,\n processed=False,\n drawn=False,\n count=count,\n filename=filename,\n extension=extension,\n status='UNPROCESSED',\n )\n\n return rdict", "def run(self):\n return_dict = {}\n for x in self.file_list:\n return_dict[x] = self.parseFile(x)\n\n return return_dict", "def create_subsets(self):\n new_dict = defaultdict(dict)\n\n for found_file in self.files:\n match = re.match(self.regex_pattern,\n found_file)\n if match:\n groups = match.groupdict()\n for att in groups:\n value = groups[att]\n try:\n new_dict[att][value].add(found_file)\n except KeyError:\n new_dict[att][value] = set([found_file])\n\n return new_dict", "def get_processed_data(self, group_directory):\n processed_dir = [x for x in group_directory.iterdir()\n if x.is_dir() and 'processed' in x.name][0]\n\n task_dirs = [x for x in processed_dir.iterdir()\n if x.is_dir() and 'task' in x.name]\n\n files = dict()\n for task in task_dirs:\n task_camera_dirs = [x for x in task.iterdir()\n if x.is_dir() and 'pc' in x.name]\n\n task_frame_files = list()\n if task_camera_dirs:\n task_frame_files = dict()\n for camera_dir in task_camera_dirs:\n task_frame_files[camera_dir.name] = [x for x in camera_dir.iterdir()\n if not x.is_dir() and x.suffix in VALID_OUTPUT_FILE_TYPES]\n\n for camera, frame_files in task_frame_files.items():\n for frame_file in frame_files:\n frame = int(re.search(r'(?<=_)(\\d{12})(?=_)',\n frame_file.name).group(0))\n if task.name not in files:\n files[task.name] = dict()\n\n if frame not in files[task.name]:\n files[task.name][frame] = dict()\n\n files[task.name][frame][camera] = frame_file\n\n else:\n task_frame_files = [x for x in task.iterdir()\n if not x.is_dir()\n and x.suffix in VALID_OUTPUT_FILE_TYPES]\n\n for frame_file in task_frame_files:\n frame = int(re.search(r'(?<=_)(\\d{12})(?=_)',\n frame_file.name).group(0))\n if task.name not in files:\n files[task.name] = dict()\n files[task.name][frame] = frame_file\n\n return files", "def group_by_filenames(self):\n package = self.container.config.output.package\n class_map = collections.group_by(self.container, key=get_location)\n groups = self.group_common_paths(class_map.keys())\n\n for keys in groups:\n if len(keys) == 1:\n common_path = os.path.dirname(keys[0])\n else:\n common_path = os.path.commonpath(keys)\n\n for key in keys:\n items = class_map[key]\n suffix = \".\".join(Path(key).parent.relative_to(common_path).parts)\n\n package_name = f\"{package}.{suffix}\" if suffix else package\n self.assign(items, package_name, module_name(key))", "def load_files(self):\n print('Saving numpy mask arrays in {0}'.format(self.ProcDir))\n\n if not os.path.isdir(self.ProcDir): os.mkdir(self.ProcDir)\n if not os.path.isdir(self.OutDir): os.mkdir(self.OutDir)\n\n self.Files = {}\n for ig in self.Set:\n phase = roipy.tools.load_half(ig,2)\n # convert wavelength to displacements\n # NOTE: make attributes of commonly used values in rsc: float(ig.Rsc['WAVELENGTH'])\n disp = phase * (ig.Wavelength / (4*np.pi))\n igram = ma.array(disp, mask=ma.nomask)\n name = self.save_ma(ig, igram) #Mask_ array is just zeros at this point..\n self.Files[ig.ID] = name\n\n print('load_files() complete: {0} interferograms'.format(self.Set.Nig))", "def _load_files(self):\n files = {}\n for fn_ in self.opts[\"src\"]:\n if os.path.isfile(fn_):\n files.update(self._file_dict(fn_))\n elif os.path.isdir(fn_):\n salt.utils.stringutils.print_cli(\n \"{} is a directory, only files are supported \"\n 'in non-chunked mode. Use \"--chunked\" command '\n \"line argument.\".format(fn_)\n )\n sys.exit(1)\n return files", "def analyze_files(self):\n num_file = 0\n results = dict()\n try:\n list_files = os.listdir(self.directory)\n except FileNotFoundError:\n raise FileNotFoundError(\"Can't find any file\")\n else:\n for file in list_files: #looping the files in the directly\n num_file += 1\n if file.endswith(\".py\"): # Looking for files that end with .py\n try:\n fp = open(os.path.join(self.directory, file), \"r\")\n except FileNotFoundError:\n raise FileNotFoundError(f\"Can't open file no {num_file}\")\n else:\n with fp:\n c_total = 0 #Total length of Characters for the entire file\n filename = file # Storing the file name\n t_line = 0 # Getting the total number of line\n t_def = 0 #Getting the total number of functions\n t_class = 0 #Getting the total number of classes\n \n for line in fp:\n t_line += 1 # Counting each line\n t_char = len(line) #Length of characters for each line\n n_line = line.strip() # gets rid of white spaces and new lines\n c_total += t_char # adding each total char in line to the pervious total char in line\n if n_line.startswith(\"def \"): \n t_def += 1 \n elif n_line.startswith(\"class \"):\n t_class += 1\n results[filename] = {'class': t_class, 'function': t_def, 'line': t_line, 'char': c_total }\n return results", "def make_file_dict():\r\n fileDict = {'pageUrls': [],\r\n 'pageFileNames': [],\r\n 'pageIds': [],\r\n 'fileUrls': [],\r\n 'fileIds': [],\r\n 'fileNames': [],\r\n 'cssUrls': [],\r\n 'cssFileNames': [],\r\n 'imgUrls': [],\r\n 'imgFileNames': []}\r\n return fileDict", "def _parse_proxy_files(self):\n out = dict()\n\n # name may be something like \"ssp1_[YEAR].tif\", which actually refers to multiple files\n # such as \"ssp1_2010.tif\" and \"ssp1_2020.tif\" when info['years'] == [2010, 2020]\n for name, info in self.proxy_files.items():\n # promote strs to list\n if isinstance(info['variables'], str):\n info['variables'] = [info['variables']]\n\n if isinstance(info['years'], int):\n info['years'] = [info['years']]\n\n # flags are optional\n if 'flags' in info:\n if isinstance(info['flags'], str):\n info['flags'] = [info['flags']]\n else:\n info['flags'] = []\n\n for variable in info['variables']:\n\n # file name may use an abbreviation of the variable name\n # if info['variables'] is a dict of form {variable: abbreviation}\n abbreviation = info['variables'][variable] if isinstance(info['variables'], dict) else variable\n\n for year in info['years']:\n # determine the actual name of the file containing variable variable for year year\n filename = name.replace('{variable}', abbreviation).replace('{year}', str(year))\n\n if filename not in out:\n out[filename] = {'variables': [], 'years': [], 'flags': info['flags']}\n\n if variable not in out[filename]['variables']:\n out[filename]['variables'].append(variable)\n if year not in out[filename]['years']:\n out[filename]['years'].append(year)\n\n self.proxy_files = out", "def filenames(self) -> dict[str, str]:\r\n ...", "def get_clean_data(self, group_directory):\n clean_dir = [x for x in group_directory.iterdir()\n if x.is_dir() and 'clean' in x.name][0]\n\n task_dirs = [x for x in clean_dir.iterdir()\n if x.is_dir() and 'task' in x.name]\n\n files = dict()\n for task in task_dirs:\n task_camera_directory = [x for x in task.iterdir()\n if x.is_dir() and 'pc' in x.name]\n\n camera_files = dict()\n for camera_directory in task_camera_directory:\n camera_id = camera_directory.name\n camera_frame_files = [x for x in camera_directory.iterdir()\n if not x.is_dir() and x.suffix in VALID_OUTPUT_FILE_TYPES]\n for frame_file in camera_frame_files:\n frame = int(re.search(r'(?<=_)(\\d{12})(?=_)',\n frame_file.name).group(0))\n if camera_directory.name not in camera_files:\n camera_files[camera_id] = dict()\n\n camera_files[camera_id][frame] = frame_file\n files[task.name] = camera_files\n\n return files", "def files(self):\n result = {}\n if 'files' in self.fields:\n indicies = xrange(len(self.fields['files']))\n files = self.fields['files']\n priorities = self.fields['priorities']\n wanted = self.fields['wanted']\n index = 1\n for item in zip(indicies, files, priorities, wanted):\n if item[3]:\n selected = True\n else:\n selected = False\n priority = PRIORITY[item[2]]\n result[item[0]] = {\n 'selected': selected,\n 'priority': priority,\n 'size': item[1]['length'],\n 'name': item[1]['name'],\n 'completed': item[1]['bytesCompleted']}\n return result", "def _build_file_tree(self):\n # Build file tree with packmode and weigth info (# of file in the packmode)\n root = {\"packmode\": None, \"weight\": None, \"children\": {}}\n for filepath, packmode in self.override_packmode_map.items():\n node = root\n for part in filepath:\n node = node[\"children\"].setdefault(\n part, {\"packmode\": None, \"weight\": None, \"children\": {}}\n )\n node[\"weight\"] = 1\n node[\"packmode\"] = packmode\n return root", "def _generate_inventory(self, datapath):\n \n files = [file for file in listdir(datapath) if '.nc' in file and not 'xyz' in file]\n # file_prefixes = list(set([ file.split('_')[0] for file in files ]))\n # file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n if self.extra_pref:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2] + [self.extra_pref]) for file in files ]))\n else:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n \n inventory = {}\n for file_prefix in file_prefixes:\n fname = path.join(datapath,f'{file_prefix}{self.first_suffix}')\n if not self.metafile:\n self.metafile = fname\n vars = [ var for var in list(Dataset(fname).variables) if var not in self.skip_vars ]\n for var in vars:\n inventory[var] = {'files': sorted([path.join(datapath,file) \n for file in listdir(datapath) if file_prefix in file])}\n return inventory", "def files(self):\n def f():\n return {'count': 0, 'size': 0, 'type': None}\n _files = defaultdict(f)\n\n for s in self.subjects:\n for sa in s.samples:\n for blob in sa.blobs.values():\n # get extension\n type = blob['name'].replace('.gz', '')\n type = type.split('/')[-1].split('.')[-1]\n _files[type]['count'] += 1\n _files[type]['type'] = type.title()\n _files[type]['size'] += blob['size']\n return _files", "def collect(dname='.'):\n files = {}\n\n for paths in os.walk(dname):\n for fname in paths[2]:\n flen = len(fname)\n fpath = os.path.join(paths[0], fname)\n try:\n files[flen].append(fpath)\n except KeyError:\n files[flen] = [fpath]\n\n return files", "def parse(cls, raw_folder: str) -> Dict[str, Any]:\n folder_path = os.path.abspath(raw_folder)\n data = dict()\n files = os.listdir(folder_path)\n for file in files:\n if is_ignored(file):\n continue\n try:\n file = os.path.join(raw_folder, file)\n datum = cls.process_file(file)\n except FileNotCompatible:\n continue\n\n _, kwrd = os.path.split(file)\n kwrd = os.path.splitext(kwrd)[0]\n data[kwrd] = datum\n\n return data", "def create_dicts(self):\n \n # remove this string from filename to make output file names more manageable\n pre_output1 = self.file1.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n pre_output2 = self.file2.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n \n # Build the output file name.\n # if prefix is present add it\n if self.out_file_prefix is not None:\n # concatenate prefix, filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = self.out_file_prefix+pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n # if no prefix don't add it!\n else:\n # concatenate filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n\n # add temp to end of file name to create a temporary output filename\n self.tempoutputfilename = self.outputfilename.replace(\".txt\", '') + \"temp.txt\"\n\n # open temp output file\n self.tempoutputfile = open(self.outputfolder + self.tempoutputfilename, 'w')\n\n \n # open FE files\n file1_open = open(self.chosenfolder + self.file1, 'r')\n file2_open = open(self.chosenfolder + self.file2, 'r')\n\n # open file1 and create a dict of the features.\n for linenumber, line in enumerate(file1_open):\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file1_dict[int(splitline[1])] = line\n # get n of rows in file1 (take the linenumber of the last line)\n self.file1_len = linenumber\n\n # repeat for features in second file but first writing the feparam and stats to temp file - when pairing with control this ensures the \"header\" comes from the test (file2) not control (file1), NB NEITHER ARE ACCURATE!!!!\n for linenumber, line in enumerate(file2_open):\n if linenumber < 10:\n self.tempoutputfile.write(line)\n # then add all features to a dictionary, with the unique feature number as a key\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file2_dict[int(splitline[1])] = line\n # get n of rows in file2\n self.file2_len = linenumber\n\n # close files\n file1_open.close()\n file2_open.close()", "def _get_check_files(self, group=None, severity=None):\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if (not severity) or severity == sev:\n check_files += Config.get_check_files(group=g,\n names=files,\n severity=sev)\n groups[g] = check_files\n return groups", "def __init__(self):\n groups = [\n os.path.splitext(f)[0] for f in os.listdir(data_dir) if f.endswith(\".json\")\n ]\n\n self._data = {\n group: IndicatorGroup.parse_file(os.path.join(data_dir, f\"{group}.json\"))\n for group in groups\n }", "def get_meta_data(self) -> dict:\n\n meta_data_dict = dict()\n meta_data_dict[\"total_fabricated_files\"] = len(self._meta_data_dict.keys())\n\n collective_fabricated_data = defaultdict(lambda: 0, dict())\n for file in self._meta_data_dict.keys():\n\n file_meta_data = self._meta_data_dict[file]\n\n for data in file_meta_data:\n collective_fabricated_data[data] += file_meta_data[data]\n\n meta_data_dict[\"total_fabricated_entries\"] = dict(collective_fabricated_data)\n meta_data_dict[\"fabricated_files\"] = self._meta_data_dict\n\n return meta_data_dict", "def get_files(self):\n\n for path, dirs, files in os.walk(self.data_path):\n for dir in dirs:\n self.original_files[dir] = []\n self.imitation_files[dir] = []\n for file in os.listdir(path + \"/\" + dir):\n if( \"original\" in file ):\n self.original_files[dir].append(path + \"/\" + dir + \"/\" + file)\n else:\n self.imitation_files[dir].append(path + \"/\" + dir + \"/\" + file)\n\n return", "def get_metadata(self):\n previous = DirectoryMetadata.load_pickle(self)\n metadata = {}\n\n for dirpath, dirnames, filenames in os.walk(self.prefix_dir):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n relative_path = path.split(self.base_dir, 1)[1]\n try:\n stats = os.stat(path)\n except OSError:\n log.exception('Error stating a file on disk while building up metadata, skipping file %s' % path)\n continue\n swift_bytes = stats.st_size\n mtime = datetime.utcfromtimestamp(stats.st_mtime)\n if (previous is not None) and (relative_path in previous.metadata) and\\\n (previous.metadata[relative_path].bytes == swift_bytes):\n swift_hash = previous.metadata[relative_path].hash\n else:\n try:\n with open(path, 'rb') as afile:\n md5_hash = hashlib.md5()\n md5_hash.update(afile.read())\n swift_hash = md5_hash.hexdigest()\n except OSError:\n log.exception('Error reading a file to create the md5 while building up metadata, skipping file %s' % path)\n continue\n\n metadata[relative_path] = FileMetadata(relative_path, swift_bytes, mtime, swift_hash)\n\n return metadata", "def files(self):\n files = dict()\n for name, value in self.__dict__.items():\n public_methods = ['list_filenames', 'add']\n if not name.startswith('_') and name not in public_methods:\n files[name] = value\n return files", "def files_debug(dirfiles):\r\n\r\n files_path = {}\r\n \r\n normal_folder = [\"K002\"]\r\n OR_folder = [\"KA01\"]\r\n IR_folder = [\"KI01\"]\r\n MIX_folder = [\"KB23\"] # VERIFICAR\r\n\r\n settings_files = [\"N15_M07_F10_\", \"N09_M07_F10_\", \"N15_M01_F10_\", \"N15_M07_F04_\"]\r\n\r\n n = 20\r\n\r\n # Normal\r\n for folder in normal_folder:\r\n for idx, setting in enumerate(settings_files):\r\n for i in range(1, n+1):\r\n key = \"Normal_\" + folder + \"_\" + str(idx) + \"_\" + str(i)\r\n files_path[key] = os.path.join(dirfiles, folder, setting + folder +\r\n \"_\" + str(i) + \".mat\")\r\n\r\n # OR\r\n for folder in OR_folder:\r\n for idx, setting in enumerate(settings_files):\r\n for i in range(1, n+1):\r\n key = \"OR_\" + folder + \"_\" + str(idx) + \"_\" + str(i)\r\n files_path[key] = os.path.join(dirfiles, folder, setting + folder +\r\n \"_\" + str(i) + \".mat\")\r\n\r\n # IR\r\n for folder in IR_folder:\r\n for idx, setting in enumerate(settings_files):\r\n for i in range(1, n+1):\r\n key = \"IR_\" + folder + \"_\" + str(idx) + \"_\" + str(i)\r\n files_path[key] = os.path.join(dirfiles, folder, setting + folder +\r\n \"_\" + str(i) + \".mat\")\r\n\r\n return files_path", "def categorize (self):\n\n fout = defaultdict(list)\n\n # Flat lists of files to collect keyed by platform,category\n collect_files = dict()\n for platform in wanted_files:\n for category, flist in wanted_files[platform].items():\n for f in flist:\n collect_files[(platform,category,f)] = list()\n\n for a in self.artifacts:\n try:\n with zfile.ZFile(a.lpath, 'r') as zf:\n if os.path.splitext(a.lpath)[-1] == '.rpm':\n a.info['plat'] = 'rhel'\n\n platform = a.info['plat']\n if platform not in platforms:\n continue\n\n zfiles = zf.getnames()\n if len(zfiles) == 0:\n print('No files in %s?' % a)\n for category, flist in wanted_files[platform].items():\n for f in flist:\n matches = [(a,x) for x in zfiles if os.path.basename(x) == f]\n if len(matches) > 0:\n collect_files[(platform,category,f)] += matches\n fout[category] += matches\n\n except zfile.tarfile.ReadError as e:\n print('ignoring artifact: %s: %s' % (a.lpath, str(e)))\n\n # Verify that all wanted combinations were matched\n errors = 0\n for missing in [x for x in collect_files if len(collect_files[x]) == 0]:\n errors += 1\n print('ERROR: No matching artifact files for', missing)\n\n if errors > 0:\n raise Exception('Not all wanted files found in artifacts, see above.')\n return fout", "def _build_dictionary(self):\n print(\"Building Dictionary...\")\n self.dictionary = Dictionary(self.load_files())", "def map_files(key):\n \n datadir=os.path.join(os.path.dirname(__file__),'ncnr_sample_data')\n filedict={'empty_1m':os.path.join(datadir,'SILIC001.SA3_SRK_S101'),\n 'empty_4m':os.path.join(datadir,'SILIC002.SA3_SRK_S102'),\n 'empty_cell_1m':os.path.join(datadir,'SILIC003.SA3_SRK_S103'),\n 'blocked_1m':os.path.join(datadir,'SILIC004.SA3_SRK_S104'),\n 'trans_empty_cell_4m':os.path.join(datadir,'SILIC005.SA3_SRK_S105'),\n 'trans_sample_4m':os.path.join(datadir,'SILIC006.SA3_SRK_S106'),\n 'blocked_4m':os.path.join(datadir,'SILIC007.SA3_SRK_S107'),\n 'empty_cell_4m':os.path.join(datadir,'SILIC008.SA3_SRK_S108'),\n 'sample_1m':os.path.join(datadir,'SILIC009.SA3_SRK_S109'),\n 'sample_4m':os.path.join(datadir,'SILIC010.SA3_SRK_S110'),\n 'mask':os.path.join(datadir,'DEFAULT.MASK'),\n 'div':os.path.join(datadir,'PLEX_2NOV2007_NG3.DIV'),\n }\n return filedict[key]", "def get_autographs(pathtofile):\n\n autos = {}\n\n path = Path(pathtofile)\n assert path.is_dir()\n file_list = []\n for x in path.iterdir():\n if x.is_dir():\n file_list.append(x)\n print(f\"Found files {len(file_list)} -- {file_list}\")\n\n for f in file_list:\n name = str(f)[len(pathtofile) + 1 :]\n autos[name] = {}\n for x in f.iterdir():\n if str(x) == f\"{pathtofile}/{name}/{name}.txt\":\n info_file = x\n f = open(info_file, \"r\").readlines()\n info_name = f[0]\n info_quote = f[1]\n elif (\n str(x) == f\"{pathtofile}/{name}/{name}.jpg\"\n or str(x) == f\"{pathtofile}/{name}/{name}.png\"\n ):\n info_img = x\n else:\n l = len(pathtofile) + len(name) + 12\n f = open(x, \"r\").read().replace(\"\\n\", \" \").split()\n s = []\n for i in range(0, len(f), 20):\n s.append(\" \".join(f[i : i + 20]))\n output = \"\\n\".join(s)\n autos[name][str(x)[l:-4]] = output\n\n return autos", "def parse_folder(self, path):\n\n for filename in os.listdir(path):\n self.parse_file(os.path.join(path, filename), filename)\n return self.country_dict, self.hre_dict, self.name_dict", "def get_pathes(self) -> Dict[str, str]:\n\n pathes: Dict[str, str] = {}\n\n for path in self.files:\n name = path.split(\"/\")[-1].split(\".\")[0]\n pathes[name] = os.path.join(self.home_folder, path)\n return pathes", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs", "def get_dict(self):\n\tself.log.debug('Getting dictionary from config files: %s', str(self.file_list))\n\tfor cfg_file in self.file_list:\n\t \"\"\"\n\t We want to append dictionaries from all the config files.\n\t \"\"\"\n\t if self.cfg_type == None: self.cfg_type = self._get_cfg_type(cfg_file)\n\t self.log.debug('Updating dictionary from config file in the order provided: %s',str(cfg_file) )\n\t if self.cfg_type.lower() in ['yaml', \"yml\"]: self._get_dict_yaml(cfg_file)\n\t elif self.cfg_type.lower() == 'xml': self._get_dict_xml(cfg_file)\n\t elif self.cfg_type.lower() == 'json': self._get_dict_json(cfg_file)\n\t elif self.cfg_type.lower() == 'ini': self._get_dict_ini(cfg_file)\n\t \n\treturn self.cfg_dict", "def create_files_dict(csv_file_name: str):\r\n\r\n SKUs = [] # list of SKU's in the csv file\r\n with open(csv_file_name, 'r') as csv_fd:\r\n csv_reader = csv.reader(csv_fd)\r\n for line in csv_reader:\r\n for SKU in line:\r\n SKUs.append(SKU)\r\n\r\n # creating a list of file extensions [.ext, ...]\r\n file_extensions = []\r\n for SKU in SKUs:\r\n for dir_file in os.listdir():\r\n if SKU in os.path.splitext(dir_file)[0]:\r\n dir_file_ext = os.path.splitext(dir_file)[1]\r\n if dir_file_ext not in file_extensions:\r\n file_extensions.append(dir_file_ext)\r\n file_extensions.sort() # sorting by ascii for constant format view\r\n # print(\"debug:::file_extensions\", file_extensions)\r\n\r\n ext_format_dict = {} # base format for creating extension dict (to be copied for each iteration)\r\n for ext in file_extensions:\r\n ext_format_dict[ext] = ''\r\n\r\n files = {}\r\n for filename_base in SKUs:\r\n for dir_file_0 in os.listdir():\r\n current_file_extensions = ext_format_dict.copy() # reset dict values for each file\r\n if filename_base in os.path.splitext(dir_file_0)[0]:\r\n # need to take the dir_file_base and re-iterate over listdir to find all exact name filenames\r\n for dir_file_1 in os.listdir():\r\n if os.path.splitext(dir_file_0)[0] == os.path.splitext(dir_file_1)[0]:\r\n dir_file_base = os.path.splitext(dir_file_1)[0]\r\n dir_file_ext = os.path.splitext(dir_file_1)[1]\r\n if dir_file_ext in list(current_file_extensions.keys()):\r\n current_file_extensions[dir_file_ext] = 'V'\r\n files[dir_file_base] = current_file_extensions\r\n\r\n return files", "def prepare_fastq(self) -> dict:\r\n\r\n fastq = {}\r\n for organism, data in self.composition.items():\r\n file = data['file']\r\n file_path = Path(file)\r\n if not file_path.exists():\r\n raise ValueError(f'File {file_path} does not exist.')\r\n else:\r\n fastq[organism] = pyfastx.Fastq(file)\r\n\r\n self.logger.info('Prepared read files - proceeding')\r\n\r\n return fastq", "def _load(self):\n res = {}\n for file in self.files:\n loaded = self.__load_json(file)\n for k, v in loaded.items():\n if isinstance(v, (list, tuple)):\n res.setdefault(k, []).extend(v)\n else:\n res.setdefault(k, []).append(v)\n\n for url, datas in self.url_for_data.items():\n if not isinstance(datas, (list, tuple)):\n datas = [datas]\n for data in datas:\n res.setdefault(url, [])\n if isinstance(data, Path):\n data = self.__load_json(data)\n if isinstance(data, (list, tuple)):\n res[url].extend(data)\n else:\n res[url].append(data)\n return res", "def _get_file_names():\n file_names = {}\n file_names['train'] = ['data_batch_%d' % i for i in xrange(1, 6)]\n file_names['test'] = ['test_batch']\n\n return file_names", "def _create_input_file_dict(self, job_data):\n\n files_dict = {}\n\n for input_name, file_ids in job_data.get_input_file_ids_by_input().items():\n file_list = []\n file_names = set()\n for file_id in file_ids:\n scale_file_model = self._input_files[file_id]\n input_file = InputFile(scale_file_model)\n # Check for file name collision and use Scale file ID to ensure names are unique\n file_name = scale_file_model.file_name\n\n if file_name in file_names:\n file_name = '%d.%s' % (scale_file_model.id, file_name)\n input_file.local_file_name = file_name\n file_names.add(file_name)\n file_list.append(input_file)\n files_dict[input_name] = file_list\n\n return files_dict", "def _find_named_files(self):\n for name, description in self.named_files.iteritems():\n name = name.format(job_name=self.job_name)\n f_path = '{}/{}'.format(self.rism3d_folder, name)\n if os.path.isfile(f_path):\n self.file_path_dic[description] = f_path\n else:\n self._not_found_error(f_path)", "def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n # read the file in, sample-by-sample\n # build the dictionary recursively\n # add rle file also to generated cfg files\n # print integrations per job as well!\n # consider more than 1 file per jobs -- the jobs are splitted by MEM integration anyways\n\n rle_filters = self.get_filter() if self.rle_filter_file else {}\n statistics = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n if not os.path.exists(sample_info['local_paths'][0]['path']):\n logging.warning(\"Skipping sample {sample_name}\".format(sample_name = sample_name))\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_addMEM, process_name))\n is_mc = (sample_info[\"type\"] == \"mc\")\n if self.rle_filter_file:\n assert(process_name in rle_filters)\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n # typically, the analysis ends here and starts looping b/c the smallest unit of work processes\n # at least one file; we need, however, to split the file into event ranges in such a way that\n # each job performs mem_integrations_per_job MEM integrations\n\n # so what we are going to do is to open each set of files in inputFileList, read the variable\n # requestMEM_*l_*tau and try to gather the event ranges such that each event range\n # performs up to mem_integrations_per_job integrations per job\n memEvtRangeDict = self.memJobList(inputFileList, rle_filters[process_name] if self.rle_filter_file else [])\n\n for jobId in memEvtRangeDict.keys():\n\n key_dir = getKey(sample_name)\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = memEvtRangeDict[jobId]['input_fileset']\n\n # there should always be a job\n assert(self.inputFiles[key_file] > 0), \"More than one input file: %s ?? !!\" % \\\n ', '.join(self.inputFiles[key_file])\n\n #assert(len(self.inputFiles[key_file]) == 1), \"There is more than one input file!\"\n self.cfgFiles_addMEM_modified[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"addMEM_%s_%s_%i_cfg.py\" % (self.channel, process_name, jobId)\n )\n self.shFiles_addMEM_modified[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"addMEM_%s_%s_%i.sh\" % (self.channel, process_name, jobId)\n )\n self.outputFiles[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_NTUPLES], \"%s_%i.root\" % (process_name, jobId)\n )\n self.logFiles_addMEM[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"addMEM_%s_%s_%i.log\" % (self.channel, process_name, jobId)\n )\n self.logFiles_addMEM[key_file] = get_log_version((self.logFiles_addMEM[key_file],))[0]\n self.createCfg_addMEM(\n self.inputFiles[key_file],\n memEvtRangeDict[jobId]['event_range'][0],\n memEvtRangeDict[jobId]['event_range'][1],\n self.outputFiles[key_file],\n self.era,\n sample_info[\"sample_category\"],\n is_mc,\n self.cfgFiles_addMEM_modified[key_file],\n memEvtRangeDict[jobId]['whitelist'],\n )\n\n # associate the output file with the fileset_id\n #UDPATE: ONE OUTPUT FILE PER SAMPLE!\n fileset_id = memEvtRangeDict[jobId]['fileset_id']\n hadd_output_dir = os.path.join(\n self.dirs[key_dir][DKEY_FINAL_NTUPLES],\n '%04d' % (fileset_id // 1000)\n )\n if not os.path.exists(hadd_output_dir):\n os.makedirs(hadd_output_dir)\n hadd_output = os.path.join(\n hadd_output_dir, '%s_%i.root' % ('tree', fileset_id) # UDPATE: ADDED\n #hadd_output_dir, \"tree.root\" # UDPATE: REMOVED\n )\n if hadd_output not in self.hadd_records:\n self.hadd_records[hadd_output] = {}\n self.hadd_records[hadd_output]['output_files'] = []\n self.hadd_records[hadd_output]['fileset_id'] = fileset_id\n self.hadd_records[hadd_output]['output_files'].append(self.outputFiles[key_file])\n self.hadd_records[hadd_output]['process_name'] = process_name\n\n # let's sum the number of integration per sample\n nofEntriesMap = {}\n for v in memEvtRangeDict.values():\n if v['fileset_id'] not in nofEntriesMap:\n nofEntriesMap[v['fileset_id']] = {\n 'nof_entries' : v['nof_entries'],\n }\n statistics[process_name] = {\n 'nof_int' : sum([entry['nof_int'] for entry in memEvtRangeDict.values()]),\n 'nof_entries' : sum([entry['nof_entries'] for entry in nofEntriesMap.values()]),\n 'nof_events_pass' : sum([entry['nof_events_pass'] for entry in memEvtRangeDict.values()]),\n 'nof_int_pass' : sum([entry['nof_int_pass'] for entry in memEvtRangeDict.values()]),\n 'nof_zero' : sum([entry['nof_zero'] for entry in memEvtRangeDict.values()]),\n 'nof_jobs' : len(memEvtRangeDict),\n }\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_addMEM)\n self.createScript_sbatch()\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_addMEM(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n self.createMakefile(lines_makefile)\n\n ws_len = max([len(kk) + 1 for kk in statistics.keys()])\n total_nof_integrations_sum = sum(x['nof_int'] for x in statistics.values())\n total_nof_entires = sum(x['nof_entries'] for x in statistics.values())\n total_nof_zero_int = sum(x['nof_zero'] for x in statistics.values())\n total_nof_jobs = sum(x['nof_jobs'] for x in statistics.values())\n total_nof_pass = sum(x['nof_events_pass'] for x in statistics.values())\n total_nof_int_pass_avg = float(sum(x['nof_int_pass'] for x in statistics.values())) / total_nof_pass\n total_nof_integrations_avg = float(total_nof_integrations_sum) / total_nof_entires\n total_nof_int_per_job = float(total_nof_integrations_sum) / total_nof_jobs\n for k, v in statistics.iteritems():\n if v['nof_entries'] == 0:\n int_per_event = 0.\n evt_pass = 0.\n else:\n int_per_event = float(v['nof_int']) / v['nof_entries']\n evt_pass = (100 * float(v['nof_events_pass']) / v['nof_entries'])\n if v['nof_events_pass'] == 0:\n nof_int_pass = 0.\n else:\n nof_int_pass = float(v['nof_int_pass']) / v['nof_events_pass']\n print('%s%s: %d (%d entries; %d jobs; %.2f int/evt; %d (%.2f%%) evt pass; %.2f int/evt pass; %d evt 0int)' %\n (k,\n ' ' * (ws_len - len(k)),\n v['nof_int'],\n v['nof_entries'],\n v['nof_jobs'],\n int_per_event,\n v['nof_events_pass'],\n evt_pass,\n nof_int_pass,\n v['nof_zero'],\n )\n )\n print('%s%s: %d (%d entries; %d jobs; %.2f int/evt; %d evt pass; %.2f int/evt pass; '\n '%.2f int/job pass; %d evt 0int)' %\n ('total',\n ' ' * (ws_len - len('total')),\n total_nof_integrations_sum,\n total_nof_entires,\n total_nof_jobs,\n total_nof_integrations_avg,\n total_nof_pass,\n total_nof_int_pass_avg,\n total_nof_int_per_job,\n total_nof_zero_int,\n )\n )\n\n if self.max_mem_integrations > 0 and total_nof_integrations_sum > self.max_mem_integrations:\n logging.error(\"Will not start the jobs (max nof integrations exceeded)!\")\n return False\n else:\n logging.info(\"Done\")\n return True", "def get_files(input_dir):\n file_rep = { \"tars\" : [] }\n \n files = os.listdir(input_dir)\n \n the_file, the_date = find_bootstrap(files)\n \n #add index file in file_rep\n file_rep['index'] = the_file\n file_rep['date'] = the_date\n \n pattern = \"ncep_forecast_%s_(?P<name>\\S+).tar\" % (the_date)\n \n the_re = re.compile(pattern)\n\n for the_file in files:\n matched = the_re.match(the_file)\n if matched:\n print(\"matched %s\" % (matched.group(\"name\")))\n file_rep['tars'].append(the_file)\n \n return file_rep", "def default_file_hierarchy_dict():\n return {\n directory(\"include\"): {\n directory(\"with spaces\"): {\n file(\"with spaces.hpp\"): {\n namespace(\"with_spaces\"): {\n function(\"int\", \"value\"): parameters()\n }\n }\n }\n }\n }", "def gather_file_data(config):\n file_regex = re.compile(config['file_regex'])\n category_regex = re.compile(config['category_regex'])\n policies = {}\n\n for root, dirs, files in os.walk(config['c7n_policy_directory']):\n for file in files:\n if file_regex.match(file):\n file_path = root + '/' + file\n logging.debug('Processing file %s', file_path)\n with open(file_path, 'r') as stream:\n try:\n if category_regex.search(file_path):\n category = 'Security & Governance'\n else:\n category = 'Cost Controls'\n\n policies = yaml.load(stream)\n for policy in policies['policies']:\n logging.debug(\n 'Processing policy %s', policy['name'])\n policy['file_url'] = get_file_url(\n file_path, config)\n resource_type = policy['resource']\n if category not in c7n_data:\n c7n_data[category] = {}\n if resource_type not in c7n_data[category]:\n c7n_data[category][resource_type] = []\n c7n_data[category][resource_type].append(policy)\n except yaml.YAMLError as exc:\n logging.error(exc)", "def map_audio(self): \n for root, dirs, files in os.walk(self.dir):\n for name in files:\n if (name.split(\".\")[-1].lower() == 'm4a' or \\\n name.split(\".\")[-1].lower() == 'mp3'):\n \n cur_path = \"{0}/{1}\".format(root, name)\n cur_file = auto.File(cur_path)\n \n artist = cur_file.artist.lower().strip()\n album = cur_file.album.lower().strip()\n title = cur_file.title.lower().strip()\n bitrate = cur_file.bitrate\n \n if not artist in self.audio_dict:\n self.audio_dict[artist] = {}\n \n if not album in self.audio_dict[artist]:\n self.audio_dict[artist][album] = {}\n \n title_key = title\n for in_album_title in self.audio_dict[artist][album]:\n if sm(None, title, in_album_title).ratio() > 0.9:\n title_key = in_album_title\n \n if not title_key in \\\n self.audio_dict[artist][album]:\n self.audio_dict[artist][album][title_key] = []\n \n self.audio_dict[artist][album][title_key].append({\n 'path': cur_path,\n 'bitrate': bitrate,\n 'file_name': name\n })\n \n return self", "def build_filedic(data_path, lanczos_path):\n filedic = {'CERA': sorted(glob.glob(data_path + 'CERA20C/*.nc')),\n 'lanczos(CERA)': sorted(glob.glob(lanczos_path + 'CERA_7*.nc')),\n 'lanczos(20CR)': sorted(glob.glob(lanczos_path + '20CRv3_5*.nc'))}\n return filedic", "def _update_files():\n configuration_settings = get_configuration()\n\n # Need to find all of the files that are stored in the input_files directories in order to start building the\n # reports that will be used to generate the static log files.\n for input_path in configuration_settings.processing.inputs:\n search_path = pathlib.Path(input_path)\n\n # Currently going to make the assumption that everyone is using the path naming convention that I'm dictating\n # which is YYYY/MM/DD/file.ext\n for file_component in search_path.glob('*/*/*/*'):\n # Store all of the files into a dictionary containing the keys and a list of the files that are associated\n # with that day\n updaters.update_files(search_path, file_component)", "def collect2dict(filenames, outdir):\n \n tbldict = {}\n for fn in filenames:\n try:\n path = max(glob.glob(outdir+fn+'*.pkl'), key=os.path.getctime)\n out = pd.read_pickle(path)\n tbldict[fn] = out\n except ValueError:\n print(fn + ' not found in ' + outdir)\n return tbldict", "def filter_overlapping_files(files):\n keys = list(files.keys())\n base = min([key.replace(\"M\", \"\") for key in files.keys()])\n base = str(base) + \"M\"\n keys.remove(base)\n base_files = files[base]\n\n dict_files_all = {}\n for key in keys:\n file_keys = files[key]\n for file_key in file_keys:\n for file_base in base_files:\n dates_overlapping = filter_overlapping_dates(file_base, file_key)\n if len(dates_overlapping) > 0:\n list_files = [file_base, file_key]\n combination = base + \"_\" + key\n if combination in dict_files_all.keys():\n dict_files_all[combination].append(list_files)\n else:\n dict_files_all[combination] = [list_files]\n return dict_files_all", "def get_files(current_dir, filename_pattern=\".*\"):\n files_dict = {}\n for root, dirs, files in os.walk(current_dir):\n files_dict.update(\n {filename: os.path.join(root, filename) for filename in files if re.match(filename_pattern, filename)}\n )\n return files_dict", "def list_files_in_directory(self):\n lesson_file_dict = dict()\n lesson_file_dict[\"files\"] = []\n\n directory_list = listdir(self.sub_dir)\n for directory in directory_list:\n if isfile(join(self.sub_dir, directory)):\n lesson_file_dict[\"files\"].append(directory)\n\n return lesson_file_dict", "def collect_seq_to_file(image_dirs, prefix, suffix):\n seq_to_file = {}\n for image_dir in image_dirs:\n seq_to_file.update(get_seq_to_file(image_dir, prefix, suffix))\n return seq_to_file", "def get_files(filename):\n files = {'base': filename}\n\n base_name, extension = os.path.splitext(filename)\n #Replace special characters in filenames - []{}()\n glob_name = re.sub(r'([\\[\\]\\(\\)\\{\\}])', r'[\\g<1>]', base_name)\n\n if extension.lower() == '.shp':\n required_extensions = dict(\n shp='.[sS][hH][pP]', dbf='.[dD][bB][fF]', shx='.[sS][hH][xX]')\n for ext, pattern in required_extensions.iteritems():\n matches = glob.glob(glob_name + pattern)\n if len(matches) == 0:\n msg = ('Expected helper file %s does not exist; a Shapefile '\n 'requires helper files with the following extensions: '\n '%s') % (base_name + \".\" + ext,\n required_extensions.keys())\n raise Exception(msg)\n elif len(matches) > 1:\n msg = ('Multiple helper files for %s exist; they need to be '\n 'distinct by spelling and not just case.') % filename\n raise Exception(msg)\n else:\n files[ext] = matches[0]\n\n matches = glob.glob(glob_name + \".[pP][rR][jJ]\")\n if len(matches) == 1:\n files['prj'] = matches[0]\n elif len(matches) > 1:\n msg = ('Multiple helper files for %s exist; they need to be '\n 'distinct by spelling and not just case.') % filename\n raise Exception(msg)\n\n matches = glob.glob(glob_name + \".[sS][lL][dD]\")\n if len(matches) == 1:\n files['sld'] = matches[0]\n elif len(matches) > 1:\n msg = ('Multiple style files for %s exist; they need to be '\n 'distinct by spelling and not just case.') % filename\n raise Exception(msg)\n\n matches = glob.glob(base_name + \".[xX][mM][lL]\")\n\n # shapefile XML metadata is sometimes named base_name.shp.xml\n # try looking for filename.xml if base_name.xml does not exist\n if len(matches) == 0:\n matches = glob.glob(filename + \".[xX][mM][lL]\")\n\n if len(matches) == 1:\n files['xml'] = matches[0]\n elif len(matches) > 1:\n msg = ('Multiple XML files for %s exist; they need to be '\n 'distinct by spelling and not just case.') % filename\n raise Exception(msg)\n\n return files", "def entry_parser():\n # from tools import file_importer, file_outporter\n from copy import copy\n from collections import defaultdict\n import os.path\n \n print(\"this is entry parser\")\n \n # inPathL = [\"bob/processed/proteinGroups - OST-1-09042017.txt\",\"bob/processed/proteinGroups_OST2.txt\",\"bob/processed/proteinGroups_OST3.txt\"]\n inpathL = []\n inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"txt_cav1ko-1-17082017\", \"proteinGroups.txt\"),\"r\")\n # outPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n fileCount = 1\n # outF = file_outporter(outPath)\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1.csv\"),\"w\")\n # newFlag = True\n \n finDict = defaultdict(list)\n cN = 0\n # for relPath in inPathL:\n outDict = {}\n # inpF = file_importer(relPath)\n headerFlag = True\n \n for inpLine in inpF:\n cN += 1\n if headerFlag:\n headerFlag = False\n headerLine = inpLine\n continue\n inpLine = inpLine.strip(\"\\n\\r\")\n inpItem = inpLine.split(\"\\t\")\n geneL = inpItem[0].split(\";\")\n lenS = len(geneL[0])\n curGene = geneL[0]\n for geneI in geneL: # find gene name with the shortest length\n if len(geneI) < lenS:\n lenS = len(geneI)\n curGene = geneI\n if \"__\" in curGene: continue # get rid of contaminant lines\n try: # get rid of wonky lines introduced by excel\n int(curGene)\n continue\n except ValueError: \n pass\n\n if curGene[-2] == \"-\":\n curGene = curGene[:-2]\n if curGene[-3] == \"-\":\n curGene = curGene[:-3]\n \n # remove ambiguities based on gene name from the entire entry:\n \n corrPos = geneL.index(curGene)\n corrLine = []\n targetCount = 46 # after the 45th item row in the list, peptide IDs and modification start to appear which are allowed to have multiple entries and do not need to be disambiguated\n currCount = 1\n pepFlag = True\n for inpE in inpItem:\n currCount += 1\n if currCount == targetCount:\n pepFlag = False\n # print inpE\n if \";\" in inpE and pepFlag:\n try:\n corrLine.append(inpE.split(\";\")[corrPos])\n except IndexError:\n corrLine.append(inpE.split(\";\")[0])\n else:\n corrLine.append(inpE.rstrip(\"\\n\"))\n\n \n if inpItem[6] == \"\":\n # print \"no protein name found. adding the uniprot ID.\"\n inpItem[6] = curGene\n \n \"\"\"\n try:\n for inpN in inpItem[4:10]:\n inpItem[inpItem.index(inpN)] = int(inpN)\n countFlag = True\n except ValueError:\n print inpItem[4:10]\n countFlag = False\n if countFlag:\n if sum(inpItem[4:10]) == 0: continue # there are some unexpressed proteins in there\n \n \"\"\"\n # print len(corrLine)\n if curGene in outDict: # handle duplicate protein entries and merge them together\n # print \"%s is duplicate\" % curGene\n if curGene == \"Protein IDs\": \n \"\"\"\n quickCount2 = 0\n for quickDictI in outDict[curGene]:\n print str(quickCount2) + \" \" + quickDictI\n quickCount2 += 1\n quickList = inpItem\n quickCount3 = 0\n for quickImp in quickList:\n print str(quickCount3) + \" \" + quickImp\n quickCount3 += 1 \n # print inpItem\n # print outDict[curGene]\n \"\"\"\n continue\n combList = []\n \n \"\"\"\n addL = []\n for i in outDict[curGene][3:]:\n addL.append(i)\n addL2 = []\n for j in corrLine[3:]:\n addL2.append(i)\n outL[3:] = map(add, addL, addL2) # admittedly this looks terrible\n \"\"\"\n \n indexN = 0\n for cItem in corrLine:\n # print indexN\n # print \"---\"\n # print len(corrLine)\n if indexN < 18 or 30 <= indexN <= 43:\n try:\n currC = int(cItem)\n currC = currC + int(outDict[curGene][indexN]) # numbers like peptide counts or LFQ values are added up during merge\n except ValueError:\n currC = cItem\n \n elif 18 <= indexN <= 25 or 28 <= indexN <= 29: # sequence coverage and scores\n currC = max([float(cItem),float(outDict[curGene][indexN])])\n \n elif 26 <= indexN <= 27 or indexN == 44:\n \"\"\"\n quickCount = 0\n for corrItem in corrLine:\n print str(quickCount) + \" \" + corrItem\n quickCount += 1\n \n import time\n \n print relPath\n print corrLine\n print outDict[curGene]\n print \"++++++++++++++++++++++++\"\n print indexN\n time.sleep(0.5)\"\"\"\n currC = cItem\n\n \n else:\n corrL = cItem.split(\";\")\n # print indexN\n # print corrLine\n # print outDict[curGene][indexN]\n dictL = outDict[curGene][indexN].split(\";\")\n mergeL = copy(dictL)\n for corrI in corrL:\n if corrI not in dictL:\n mergeL.append(corrI)\n \n currC = \";\".join(mergeL)\n\n combList.append(currC)\n\n \n indexN +=1\n \n \n combList[-1] = \"merged\" \n outDict[curGene] = combList \n # print \"merged:\"\n # print combList\n else:\n corrLine.append(\"unique\")\n outDict[curGene] = corrLine\n\n \n print(fileCount)\n \n\n # if not newFlag: print fileCount, testKey, finDict[testKey] \n # if newFlag:\n # newFlag = False\n \n for outKey,outValue in list(outDict.items()): \n if outKey in finDict: # add modified dicts together into single, unified dict\n # print fileCount, finDict[outKey]\n # print outValue\n outIndex = 0\n for outItem in outValue:\n finDict[outKey][outIndex].append(outItem)\n outIndex += 1\n # print finDict[outKey]\n\n else: # or just add new entries\n if fileCount == 1:\n for outItem in outValue:\n finDict[outKey].append([outItem])\n \n else: # fill up entries that were not present in the previous cycle\n loopCount = 0\n while loopCount < fileCount - 1:\n for i in range(len(outValue)):\n if len(finDict[outKey]) == i:\n finDict[outKey].append([])\n else:\n finDict[outKey][i].append(\"\")\n loopCount += 1\n outIndex = 0\n for outItem in outValue:\n # print finDict[outKey]\n finDict[outKey][outIndex].append(outItem) \n outIndex += 1\n\n for testKey in finDict: # fill up entries in result dict which were not present in previous file\n if len(finDict[testKey][0]) < fileCount:\n for i in range(len(finDict[testKey])):\n finDict[testKey][i].append(\"\")\n\n if len(inpathL) > 1: fileCount += 1 # this is needed if multiple files are parsed\n for finK, finV in list(finDict.items()):\n for finI in finV[-1]:\n if finI != \"unique\" and finI != \"\":\n print(finK, finV)\n\n \n \n outN = 0 \n # prepare header for file:\n headList = headerLine.strip(\"\\n\\r\").split(\"\\t\")\n if fileCount > 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\")\n headerCount = 1\n while headerCount < fileCount:\n outF.write(headerI + \"-\" + str(headerCount) + \"|\")\n headerCount += 1 \n outF.write(headerI + \"-\" + str(headerCount) + \"\\t\")\n \n headerCount = 1\n while headerCount < fileCount:\n outF.write(headList[-1] + \"-\" + str(headerCount) + \"|\")\n headerCount += 1\n \n outF.write(headList[-1] + \"-\" + str(headerCount) + \"\\n\")\n\n elif fileCount == 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\") \n outF.write(headerI + \"\\t\")\n outF.write(headList[-1].replace(\",\",\".\") + \"\\n\")\n \n else:\n print(\"number of input files should be at least one. Got less somehow\")\n raise ValueError\n \n \n for outDK, outDV in list(finDict.items()): # write out assembled results to a file\n outN += 1\n if len(outDK) > 30: print(\"this line should not be displayed\")\n # print outDV[1]\n # if outN == 100: break\n nameCount = 0\n for outI in outDV:\n # if nameCount == 0: print outI\n for outPiece in outI[:-1]:\n outU = outPiece.replace(\",\",\".\")\n if outU == \"\": outF.write(\"_|\")\n else: outF.write(str(outU) + \"|\")\n if outI[-1] == \"\": # handle missing entries\n if nameCount == 6: outF.write(outDV[0][0] + \"\\t\") # replace missing gene names with their uniprot ID\n else: outF.write(\"_\\t\")\n else: outF.write(str(outI[-1]).replace(\",\",\".\") + \"\\t\")\n nameCount += 1\n outF.write(\"\\n\")\n \n\n print(\"unique proteins: \", outN)\n print(\"lines parsed: \", cN)\n # print headerLine\n inpF.close()\n outF.close()", "def _get_scores_list(self):\n self.scores = dict()\n for subdir in os.listdir(self.path):\n if subdir.startswith('.'):\n continue\n try:\n score = open('{0}/{1}/{2}/extract_all.sort.uniq.txt'.format(self.path, subdir, DOCKING_RUN_FILES),'r').read().split()[-1]\n self.scores[subdir] = float(score.strip())\n except:\n pass", "def _generate_tfs_dfs(self) -> dict:\n tfs, dfs = {}, {}\n\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n if doc_path not in tfs:\n tfs[doc_path] = {}\n with open(doc_path, 'r') as f:\n text = f.readline()\n terms = set(text.split())\n for term in terms:\n tfs[doc_path][term] = text.count(term)\n\n if term not in dfs:\n dfs[term] = 1\n else:\n dfs[term] += 1\n\n return tfs, dfs", "def load_data_files() -> Dict[str, Path]:\n default_path = paths.MISCELLANEOUS_DIRECTORY / \"portfolio\"\n custom_exports = (\n get_current_user().preferences.USER_PORTFOLIO_DATA_DIRECTORY / \"optimization\"\n )\n data_files = {}\n for directory in [default_path, custom_exports]:\n for file_type in [\"xlsx\", \"ini\"]:\n for filepath in Path(directory).rglob(f\"*.{file_type}\"):\n if filepath.is_file() and \"example\" not in filepath.name:\n data_files[filepath.name] = filepath\n\n return data_files", "def _buildindex( self ):\n try:\n import ROOT as rt\n except:\n print \"Could not load ROOT\"\n sys.exit(-1)\n \n # sigh. this is a mess\n self.producers = [] # all producer names found in ROOT files\n self.datatypes = [] # all data types\n self.flavors = [] # flavor = hash of string listing set of trees found in a given file\n self.flavor_def = {} # map from flavor to list of tree names\n self.rawdigits_entrymap = {} # only used if file type is raw digits. maps rse to (position,wfms) in data tree\n self.rawdigits_tpcindex = {}\n flavor_eventset = {}\n eventsets = []\n events_to_files = {}\n events_to_flavors = {}\n\n # this loop is going into each file in our list and\n # - taking the list of trees in the file and making a has out of their names\n # - this hash is used to define the 'flavor' of the file\n # - we also make a list of events in the tree, labeling each entry with (run,subrun,event) ID\n # - we keep track of such list of entries and group files (and flavors) with the same event list\n # - determine filetype: LArCV or LArLite\n self.filetype = None\n for f in self.larlitefilelist:\n r = rt.TFile(f)\n nfkeys = r.GetListOfKeys().GetEntries()\n\n # now here we parse the type of objects in the ROOT file\n # we are looking to determine three file types supported by pylard\n # (1) larlite (2) larcv (3) rawdigitreader\n trees = []\n for i in range(nfkeys):\n keyname = r.GetListOfKeys().At(i).GetName()\n if keyname==\"larlite_id_tree\":\n found_id_tree = True\n elif \"_tree\" in keyname:\n producer = keyname.split(\"_\")[1]\n dtype = keyname.split(\"_\")[0]\n if producer not in self.producers:\n self.producers.append( producer )\n if dtype not in self.datatypes:\n self.datatypes.append( dtype )\n elif \"rawdigitwriter\" in keyname:\n trees.append( \"rawdigitwriter/RawDigits\" )\n trees.append( \"rawdigitwriter/OpDetWaveforms\" )\n trees.append( \"rawdigitwriter/IndexRawDigits\" )\n trees.append( \"rawdigitwriter/IndexOpDetWfms\" )\n if keyname not in trees:\n trees.append(keyname)\n hashstr = \"\"\n trees.sort()\n for keyname in trees:\n hashstr += keyname +\";\"\n\n # determine filetype from type of keys we see\n is_supported_rootfile = False\n idtreename = None\n if \"larlite_id_tree\" in trees:\n thisfiletype = \"LARLITE\"\n is_supported_rootfile = True\n if \"image2d\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"partroi\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"rawdigitwriter/OpDetWaveforms\" in trees:\n thisfiletype = \"RAWDIGITS\"\n is_supported_rootfile = True\n if not is_supported_rootfile:\n continue\n\n if self.filetype is not None and self.filetype!=thisfiletype:\n print \"Error in parsing filelist: Cannot mix filetypes (LArCV/LArLite/RawDigitTree)\"\n return\n elif self.filetype is None:\n self.filetype = thisfiletype\n \n # now we determine the idtree to use\n if self.filetype==\"LARLITE\":\n idtreename = \"larlite_id_tree\"\n elif self.filetype==\"LARCV\":\n if self.loaded_larcv == False:\n s = time.time()\n import larcv as larcv\n print \"LOADING LARCV: \",time.time()-s,\"secs\"\n self.loaded_larcv = True\n for treename in trees:\n if \"image2d\" in treename:\n if idtreename is None:\n idtreename = treename\n else:\n pass # we only use this if we have to\n if \"partroi\" in treename:\n idtreename = treename # we prefer to use this tree for speed\n break\n elif self.filetype==\"RAWDIGITS\":\n idtreename = \"rawdigitwriter/IndexOpDetWfms\"\n\n if idtreename is None:\n print \"Error: Could not setup a proper ID tree for this file\"\n continue\n\n # now we parse the tree contents. define a flavor for it based on all the trees\n # we also get the (run,subrun,event) id for the event\n m = hashlib.md5()\n m.update(hashstr)\n flavor = m.digest()\n if flavor not in self.flavors:\n self.flavors.append( flavor )\n flavor_eventset[flavor] = []\n self.flavor_def[flavor] = hashstr\n if self.filetype==\"LARLITE\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"LARCV\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"RAWDIGITS\":\n idtree = r.Get(idtreename)\n \n eventset = [] # list of events\n for n in range(idtree.GetEntries()):\n idtree.GetEntry(n)\n if self.filetype==\"LARLITE\":\n rse = ( idtree._run_id, idtree._subrun_id, idtree._event_id )\n elif self.filetype==\"LARCV\":\n idbranchname = idtreename.replace(\"_tree\",\"_branch\")\n idbranch = None\n exec(\"idbranch=idtree.%s\"%(idbranchname))\n rse = ( idbranch.run(), idbranch.subrun(), idbranch.event() )\n elif self.filetype==\"RAWDIGITS\":\n rse = ( idtree.idx_run, idtree.idx_subrun, idtree.idx_event )\n self.rawdigits_entrymap[rse] = (idtree.entrystart, idtree.nentries )\n eventset.append(rse)\n if rse not in flavor_eventset[flavor]:\n flavor_eventset[flavor].append( rse )\n else:\n raise ValueError( \"found a repeated run/subrun/event index (%s). what?\"%( str(rse) ) )\n if self.filetype==\"RAWDIGITS\":\n # rawdigits has another tree index for the TPC\n tpcindex = r.Get(\"rawdigitwriter/IndexRawDigits\")\n for n in range(tpcindex.GetEntries()):\n tpcindex.GetEntry(n)\n rse = ( tpcindex.idx_run, tpcindex.idx_subrun, tpcindex.idx_event )\n self.rawdigits_tpcindex[rse] = (tpcindex.entrystart, tpcindex.nentries)\n \n eventset = tuple(eventset)\n if eventset not in events_to_files:\n events_to_files[eventset] = {}\n events_to_flavors[eventset] = []\n eventsets.append( eventset )\n events_to_files[eventset][flavor] = f\n events_to_flavors[eventset].append( flavor )\n del idtree\n r.Close()\n self.parsed = True\n\n # now we take our collection of event lists and\n # - sort the event lists\n # - make lists of files with the same set of events in the order of the sorted event list\n # - for each list we also make a dictionary between (run,subrun,event) index to the entry number\n # - we pick the list with the biggest number of events as the \"official\" file list\n eventsets.sort()\n flavorfiles = {}\n flavorsets = []\n\n flavorset_rse_dict = {}\n flavorset_entry_dict = {}\n for eventset in eventsets:\n events_to_flavors[eventset].sort() # sort the flavors with this event-set\n flavorset = tuple( events_to_flavors[eventset] )\n if flavorset not in flavorfiles:\n flavorfiles[flavorset] = []\n flavorsets.append(flavorset)\n flavorset_rse_dict[flavorset] = {}\n flavorset_entry_dict[flavorset] = {}\n for flavor in flavorset:\n flavorfiles[flavorset].append( events_to_files[eventset][flavor] )\n for rse in eventset:\n ientry = len( flavorset_rse_dict[flavorset] )\n flavorset_rse_dict[flavorset][rse] = ientry\n flavorset_entry_dict[flavorset][ientry] = rse\n\n # look for largest fileset\n maxset = None\n nfiles = 0\n for fset in flavorsets:\n n = len(flavorfiles[fset])\n if n>nfiles:\n nfiles = n\n maxset = fset\n # these are the final file list and event dictionary we want\n self.sorted_filelist = flavorfiles[maxset]\n self.rse_dict = flavorset_rse_dict[maxset]\n self.entry_dict = flavorset_entry_dict[maxset]\n\n # for rawdigits, we also build the entry to data map\n if self.filetype==\"RAWDIGITS\":\n treepos = 0\n treepos_tpc = 0\n for entry in range(len(self.entry_dict)):\n rse = self.entry_dict[entry] \n # update OPDET tree\n pos_entries = self.rawdigits_entrymap[rse] # pos is from start of file, nentries is for the event block\n merged_pos_entries = ( treepos, pos_entries[1] )\n treepos += pos_entries[1]\n self.rawdigits_entrymap[rse] = merged_pos_entries # update \n # update TPC tree\n pos_entries = self.rawdigits_tpcindex[rse]\n merged_pos_entries = ( treepos_tpc, pos_entries[1] )\n treepos_tpc += pos_entries[1]\n self.rawdigits_tpcindex[rse] = merged_pos_entries # update", "def _make_files(self):\n if not self.path.is_dir():\n raise FileNotFoundError(f\"Path {self.path} does not exist.\")\n\n # Make the filepaths\n self.file_points = self.path / \"point.dat\"\n self.file_lines = self.path / \"line.dat\"\n self.file_cadastre = self.path / \"cadastre.dat\"\n self.file_portals = self.path / \"portals.dat\"\n\n with open(self.file_points, \"w\") as f:\n # 2 lines ignored\n header = datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n f.write(header)\n self.points_dfs = []\n with open(self.file_lines, \"w\") as f:\n # 5 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + 3 * \"Generated: \\n\"\n + \"Name,Section,source_group,x1,y1,z1,x2,y2,z2,width,vert. ext.,-,-,\"\n \"emission_rate[kg/h/km],-,-,-,-\\n\"\n )\n f.write(header)\n with open(self.file_cadastre, \"w\") as f:\n # 1 line ignored\n header = \"x,y,z,dx,dy,dz,emission_rate[kg/h],-,-,-,source_group\\n\"\n f.write(header)\n with open(self.file_portals, \"w\") as f:\n # 2 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + \"x1,y1,x2,y2,z0,z1,emission_rate[kg/h],-,-,-,source_group\\n\"\n )\n f.write(header)\n\n\n # File to save the source groups values\n self.file_source_groups = self.path / \"source_groups.json\"\n with open(self.file_source_groups, \"w\") as f:\n # reverse the dict (items become keys and vice versa)\n reversed_source_groups = {v: k for k, v in self.source_groups.items()}\n json.dump(reversed_source_groups, f, indent=2)", "def read_data_files(self):\n\n for name, snap in zip(self.names, self.snaps):\n # build the very important dictionary:\n key = f'{name}_{snap:03}' # e.g 'MW_000'\n self.galaxies[key] = Galaxy(name, snap, self.path, \n self.usesql, self.ptype, self.stride)\n self.time = self.galaxies[key].time\n\n # bits of minor housekeeping:\n # self.path = self.galaxies[key].filepath # may speed up next search\n self.filenames.append(key)", "def create_dicts(filelist, class_out, **kwargs):\n\n c = TwoLevelCountDict()\n d = TwoLevelCountDict()\n m = TwoLevelCountDict()\n\n def _merge_dicts(tup):\n new_c, new_d, new_m = tup\n _merge_tlcd(c, new_c)\n _merge_tlcd(d, new_d)\n _merge_tlcd(m, new_m)\n print(len(c), len(d), len(m))\n\n pool = Pool(4)\n\n for f in filelist:\n #p = Process(target=_process_file, args=(f))\n pool.apply_async(_process_file, args=[f], callback=_merge_dicts)\n #_process_file(f, c, d, m)\n\n # Close the pool...\n pool.close()\n pool.join()\n\n # Write out the dicitonaries...\n c_f = open(c_path, 'wb')\n d_f = open(d_path, 'wb')\n m_f = open(m_path, 'wb')\n\n pickle.dump(c, c_f)\n pickle.dump(d, d_f)\n pickle.dump(m, m_f)\n c_f.close()", "def CollectDatasets(redirector_str):\n \n \n # uploadDir = 'srv/' for lpcjobqueue shell or TTbarAllHadUproot/ for coffea casa and WinterFell\n \n if 'cmsxrootd' in redirector_str:\n uploadDir = 'srv'\n else:\n uploadDir = 'TTbarAllHadUproot'\n \n uploadDir = ''\n \n filedir = 'nanoAODv9Files/'\n Years = ['UL16', 'UL17', 'UL18']\n VFP = ['preVFP', 'postVFP'] # preVFP unavailable in Winterfell for the moment\n # VFP = ['postVFP'] # Only for simple test in WinterFell\n filesets = {} # To be filled and returned by this function\n \n # ---- Before concatenation with +=, lists should be declard ---- # \n \n for y in Years:\n if '16' in y:\n for v in VFP:\n filesets[y+v+'_QCD'] = []\n filesets[y+v+'_TTbar_700_1000'] = []\n filesets[y+v+'_TTbar_1000_Inf'] = []\n # ---- JetHT and SingleMu ---- #\n for l in ['', 'B', 'C', 'D', 'E', 'F']:\n filesets[y+'preVFP_JetHT'+l+'_Data'] = []\n filesets[y+'preVFP_SingleMu'+l+'_Data'] = []\n for l in ['', 'F', 'G', 'H']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n \n elif '17' in y:\n filesets[y+'postVFP_QCD'] = []\n filesets[y+'postVFP_TTbar'] = []\n for l in ['', 'B', 'C', 'D', 'E', 'F']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n else:\n filesets[y+'postVFP_QCD'] = []\n filesets[y+'postVFP_TTbar'] = []\n for l in ['', 'A', 'B', 'C', 'D']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n # ---- Loop through years and VFP status, filling the filesets dictionary with the MC file locations from corresponding txt files ---- #\n \n for y in Years:\n if '16' in y:\n for v in VFP:\n # ---- QCD ---- #\n ulqcdfilename = filedir + 'QCD/QCD_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulqcdfilename) as f:\n ulqcdfiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_QCD'] += ulqcdfiles\n \n # ---- TTbar ---- #\n ulttbar700to1000filename = filedir + 'TT/TT_Mtt-700to1000_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulttbar700to1000filename) as f:\n ulttbar700to1000files = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n ulttbar1000toInffilename = filedir + 'TT/TT_Mtt-1000toInf_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulttbar1000toInffilename) as f:\n ulttbar1000toInffiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_TTbar_700_1000'] += ulttbar700to1000files\n filesets[y+v+'_TTbar_1000_Inf'] += ulttbar1000toInffiles\n \n # ---- JetHT ---- #\n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist:\n if 'pre' in v:\n if 'Run2016' in filename: #preVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2016 = [redirector_str + s.strip() for s in f.readlines() if ('HIPM' in s and not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2016 \n elif 'post' in v:\n if 'Run2016' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2016 = [redirector_str + s.strip() for s in f.readlines() if ('HIPM' not in s and not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2016\n \n # ---- Z' Dark Matter Mediator ---- #\n ulZprimeDMfilename = filedir + 'ZprimeDMToTTbar/ZprimeDMToTTbar_NanoAODv9_' + y + '_' + v + '.txt'\n ulDMfiles=[]\n k=0\n for i in range(1000, 5500, 500):\n with open(ulZprimeDMfilename) as f:\n ulDMfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"ResoIncl_MZp\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_DM'+str(i)] = ulDMfiles[k]\n k += 1\n \n# # ---- RS KK Gluon ---- #\n# ulRSGluonfilename = filedir + 'RSGluonToTT/RSGluonToTT_NanoAODv9_' + y + '_' + v + '.txt'\n# ulRSGluonfiles=[]\n# l=0\n# for i in range(1000, 5500, 500):\n# with open(ulRSGluonfilename) as f:\n# ulRSGluonfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"RSGluonToTT_M-\"+str(i) in s and not s.startswith('#'))])\n# filesets[y+v+'_RSGluon'+str(i)] += ulRSGluonfiles[l]\n# l += 1\n \n else: # UL17 and UL18\n v = VFP[1] # No preVFP after 2016 Run vertex problem was fixed\n \n # ---- QCD ---- #\n ulqcdfilename = filedir + 'QCD/QCD_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulqcdfilename) as f:\n ulqcdfiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_QCD'] += ulqcdfiles\n\n# # ---- TTbar ---- #\n# ulttbar700to1000filename = filedir + 'TT/TT_Mtt-700to1000_NanoAODv9_' + y + '_' + v + '.txt'\n# with open(ulttbar700to1000filename) as f:\n# ulttbar700to1000files = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n# ulttbar1000toInffilename = filedir + 'TT/TT_Mtt-1000toInf_NanoAODv9_' + y + '_' + v + '.txt'\n# with open(ulttbar1000toInffilename) as f:\n# ulttbar1000toInffiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n# filesets[y+v+'_TTbar_700_1000'] += ulttbar700to1000files\n# filesets[y+v+'_TTbar_1000_Inf'] += ulttbar1000toInffiles\n \n # ---- JetHT ---- #\n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist: \n if 'Run2017' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2017 = [redirector_str + s.strip() for s in f.readlines() if (not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2017\n elif 'Run2018' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2018 = [redirector_str + s.strip() for s in f.readlines() if (not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2018\n\n # ---- Z' Dark Matter Mediator ---- #\n ulZprimeDMfilename = filedir + 'ZprimeDMToTTbar/ZprimeDMToTTbar_NanoAODv9_' + y + '_' + v + '.txt'\n ulDMfiles=[]\n k=0\n for i in range(1000, 5500, 500):\n with open(ulZprimeDMfilename) as f:\n ulDMfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"ResoIncl_MZp\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_DM'+str(i)] = ulDMfiles[k]\n k += 1\n \n # ---- RS KK Gluon ---- #\n ulRSGluonfilename = filedir + 'RSGluonToTT/RSGluonToTT_NanoAODv9_' + y + '_' + v + '.txt'\n ulRSGluonfiles=[]\n l=0\n for i in range(1000, 5500, 500):\n with open(ulRSGluonfilename) as f:\n ulRSGluonfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"RSGluonToTT_M-\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_RSGluon'+str(i)] = ulRSGluonfiles[l]\n l += 1\n \n \n # ---- JetHT Eras---- #\n \n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist:\n \n if 'Run2016B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2016b = [redirector_str + s.strip() for s in b.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTB_Data'] += jetdatafiles2016b\n elif 'Run2016C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2016c = [redirector_str + s.strip() for s in c.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTC_Data'] += jetdatafiles2016c\n elif 'Run2016D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2016d = [redirector_str + s.strip() for s in d.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTD_Data'] += jetdatafiles2016d\n elif 'Run2016E' in filename:\n with open(filedir + 'JetHT/' + filename) as e:\n jetdatafiles2016e = [redirector_str + s.strip() for s in e.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTE_Data'] += jetdatafiles2016e\n elif 'Run2016F' in filename:\n with open(filedir + 'JetHT/' + filename) as fold:\n jetdatafiles2016fold = [redirector_str + s.strip() for s in fold.readlines() if ('HIPM' in s and not s.startswith('#'))]\n with open(filedir + 'JetHT/' + filename) as fnew:\n jetdatafiles2016fnew = [redirector_str + s.strip() for s in fnew.readlines() if ('HIPM' not in s and not s.startswith('#'))]\n filesets['UL16preVFP_JetHTF_Data'] += jetdatafiles2016fold\n filesets['UL16postVFP_JetHTF_Data'] += jetdatafiles2016fnew\n elif 'Run2016G' in filename:\n with open(filedir + 'JetHT/' + filename) as g:\n jetdatafiles2016g = [redirector_str + s.strip() for s in g.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_JetHTG_Data'] += jetdatafiles2016g\n elif 'Run2016H' in filename:\n with open(filedir + 'JetHT/' + filename) as h:\n jetdatafiles2016h = [redirector_str + s.strip() for s in h.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_JetHTH_Data'] += jetdatafiles2016h\n \n if 'Run2017B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2017b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTB_Data'] += jetdatafiles2017b\n elif 'Run2017C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2017c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTC_Data'] += jetdatafiles2017c\n elif 'Run2017D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2017d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTD_Data'] += jetdatafiles2017d\n elif 'Run2017E' in filename:\n with open(filedir + 'JetHT/' + filename) as e:\n jetdatafiles2017e = [redirector_str + s.strip() for s in e.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTE_Data'] += jetdatafiles2017e\n elif 'Run2017F' in filename:\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2017f = [redirector_str + s.strip() for s in f.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTF_Data'] += jetdatafiles2017f\n \n if 'Run2018A' in filename:\n with open(filedir + 'JetHT/' + filename) as a:\n jetdatafiles2018a = [redirector_str + s.strip() for s in a.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTA_Data'] += jetdatafiles2018a\n elif 'Run2018B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2018b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTB_Data'] += jetdatafiles2018b\n elif 'Run2018C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2018c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTC_Data'] += jetdatafiles2018c\n elif 'Run2018D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2018d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTD_Data'] += jetdatafiles2018d\n \n\n \n # ---- Single Muon ---- #\n datafilelist = os.listdir(filedir + 'SingleMu/')\n for filename in datafilelist:\n \n if 'Run2016B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2016b = [redirector_str + s.strip() for s in b.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuB_Data'] += jetdatafiles2016b\n elif 'Run2016C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2016c = [redirector_str + s.strip() for s in c.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuC_Data'] += jetdatafiles2016c\n elif 'Run2016D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2016d = [redirector_str + s.strip() for s in d.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuD_Data'] += jetdatafiles2016d\n elif 'Run2016E' in filename:\n with open(filedir + 'SingleMu/' + filename) as e:\n jetdatafiles2016e = [redirector_str + s.strip() for s in e.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuE_Data'] += jetdatafiles2016e\n elif 'Run2016F' in filename:\n with open(filedir + 'SingleMu/' + filename) as fold:\n jetdatafiles2016fold = [redirector_str + s.strip() for s in fold.readlines() if ('HIPM' in s and not s.startswith('#'))]\n with open(filedir + 'SingleMu/' + filename) as fnew:\n jetdatafiles2016fnew = [redirector_str + s.strip() for s in fnew.readlines() if ('HIPM' not in s and not s.startswith('#'))]\n filesets['UL16preVFP_SingleMuF_Data'] += jetdatafiles2016fold\n filesets['UL16postVFP_SingleMuF_Data'] += jetdatafiles2016fnew\n elif 'Run2016G' in filename:\n with open(filedir + 'SingleMu/' + filename) as g:\n jetdatafiles2016g = [redirector_str + s.strip() for s in g.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_SingleMuG_Data'] += jetdatafiles2016g\n elif 'Run2016H' in filename:\n with open(filedir + 'SingleMu/' + filename) as h:\n jetdatafiles2016h = [redirector_str + s.strip() for s in h.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_SingleMuH_Data'] += jetdatafiles2016h\n \n if 'Run2017B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2017b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuB_Data'] += jetdatafiles2017b\n elif 'Run2017C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2017c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuC_Data'] += jetdatafiles2017c\n elif 'Run2017D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2017d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuD_Data'] += jetdatafiles2017d\n elif 'Run2017E' in filename:\n with open(filedir + 'SingleMu/' + filename) as e:\n jetdatafiles2017e = [redirector_str + s.strip() for s in e.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuE_Data'] += jetdatafiles2017e\n elif 'Run2017F' in filename:\n with open(filedir + 'SingleMu/' + filename) as f:\n jetdatafiles2017f = [redirector_str + s.strip() for s in f.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuF_Data'] += jetdatafiles2017f\n \n if 'Run2018A' in filename:\n with open(filedir + 'SingleMu/' + filename) as a:\n jetdatafiles2018a = [redirector_str + s.strip() for s in a.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuA_Data'] += jetdatafiles2018a\n elif 'Run2018B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2018b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuB_Data'] += jetdatafiles2018b\n elif 'Run2018C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2018c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuC_Data'] += jetdatafiles2018c\n elif 'Run2018D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2018d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuD_Data'] += jetdatafiles2018d\n \n \n # print(filesets['UL16postVFP_JetHT_Data'])\n # print('==========================================================================================================')\n # print(filesets['UL16postVFP_TTbar'])\n \n return filesets", "def make_files(self):\n return []", "def map(self, clean=False):\n self.files = {}\n curs = DatabaseManager.Instance().cursor\n if clean:\n curs.execute(\"DELETE FROM DestinationsFilesList WHERE `destinationName`=%s;\", (self.name, ))\n DatabaseManager.Instance().connector.commit()\n for root, directory, files in os.walk(self.path):\n\n for file_ in files:\n path = os.path.join(root, file_)\n relative_path = self.get_relative_path(path)\n if self.filter.test(File(path)):\n fwh = None\n if not clean:\n sql = \"SELECT * FROM DestinationsFilesList WHERE `path`=%s AND `destinationName`=%s LIMIT 1;\"\n curs.execute(sql, (relative_path, self.name))\n res = curs.fetchone()\n # curs.fetchall()\n if res is not None:\n # file already in DB, so use it\n fwh = FileWithHash.from_sql_query(res)\n if fwh is None:\n fwh = FileWithHash(path, self.name, None, relative_path)\n sql2 = \"INSERT INTO DestinationsFilesList (`hash`, `path`, `destinationName`) VALUES(%s, %s, %s);\"\n # self.logger.info(\"%s add: %s\", [self.name, relative_path]\n curs.execute(sql2, (fwh.hash, relative_path, fwh.destination_name))\n DatabaseManager.Instance().connector.commit()\n self.files[fwh.hash] = fwh", "def extract_folder_file_structure() -> Dict[str, List[str]]:\n folders_and_files = {}\n for path_to_folder in glob.glob(f\"{ZULIPTERMINAL}/**/\", recursive=True):\n complete_directory_path = Path(path_to_folder)\n if complete_directory_path.name in FOLDERS_TO_EXCLUDE:\n continue\n relative_directory_path = complete_directory_path.relative_to(ROOT_DIRECTORY)\n if str(relative_directory_path) not in DESC_FOR_NO_FILE_FOLDERS:\n files_in_directory = [\n file.name\n for file in complete_directory_path.glob(\"*.py\")\n if file.name != \"__init__.py\"\n ]\n folders_and_files[str(relative_directory_path)] = files_in_directory\n return folders_and_files", "def process(self):\r\n\r\n index = cindex.Index.create()\r\n self.headers = {}\r\n\r\n for f in self.files:\r\n if f in self.processed:\r\n continue\r\n\r\n print \"Processing `%s'\" % (os.path.basename(f),)\r\n\r\n tu = index.parse(f, self.flags)\r\n\r\n if len(tu.diagnostics) != 0:\r\n fatal = False\r\n\r\n for d in tu.diagnostics:\r\n sys.stderr.write(d.format)\r\n sys.stderr.write(\"\\n\")\r\n\r\n if d.severity == cindex.Diagnostic.Fatal or \\\r\n d.severity == cindex.Diagnostic.Error:\r\n fatal = True\r\n\r\n if fatal:\r\n sys.stderr.write(\"\\nCould not generate documentation due to parser errors\\n\")\r\n sys.exit(1)\r\n\r\n if not tu:\r\n sys.stderr.write(\"Could not parse file %s...\\n\" % (f,))\r\n sys.exit(1)\r\n\r\n # Extract comments from files and included files that we are\r\n # supposed to inspect\r\n extractfiles = [f]\r\n\r\n for inc in tu.get_includes():\r\n filename = str(inc.include)\r\n self.headers[filename] = True\r\n\r\n if filename in self.processed or (not filename in self.files) or filename in extractfiles:\r\n continue\r\n\r\n extractfiles.append(filename)\r\n\r\n for e in extractfiles:\r\n db = comment.CommentsDatabase(e, tu)\r\n\r\n self.add_categories(db.category_names)\r\n self.commentsdbs[e] = db\r\n\r\n self.visit(tu.cursor.get_children())\r\n\r\n for f in self.processing:\r\n self.processed[f] = True\r\n\r\n self.processing = {}\r\n\r\n # Construct hierarchy of nodes.\r\n for node in self.all_nodes:\r\n q = node.qid\r\n\r\n if node.parent is None:\r\n par = self.find_parent(node)\r\n\r\n # Lookup categories for things in the root\r\n if (par is None or par == self.root) and (not node.cursor is None):\r\n location = node.cursor.extent.start\r\n db = self.commentsdbs[location.file.name]\r\n\r\n if db:\r\n par = self.category_to_node[db.lookup_category(location)]\r\n\r\n if par is None:\r\n par = self.root\r\n\r\n par.append(node)\r\n\r\n # Resolve comment\r\n cm = self.find_node_comment(node)\r\n\r\n if cm:\r\n node.merge_comment(cm)\r\n\r\n # Keep track of classes to resolve bases and subclasses\r\n classes = {}\r\n\r\n # Map final qid to node\r\n for node in self.all_nodes:\r\n q = node.qid\r\n self.qid_to_node[q] = node\r\n\r\n if isinstance(node, nodes.Class):\r\n classes[q] = node\r\n\r\n # Resolve bases and subclasses\r\n for qid in classes:\r\n classes[qid].resolve_bases(classes)\r\n\r\n self.markup_code(index)", "def collect_meta() -> Dict[str, Any]:\n out = {}\n for integrations_dir, meta in all_integrations():\n integration_name = integrations_dir.name\n out[integration_name] = meta\n # print(f\"Processed meta for integration {integration_name}\")\n return out", "def main(self, verbose=0):\n indepdict=self.scan_for_loop(self.indeploop)\n pegdict1 = self.scan_for_loop(self.pegloop1)\n pegdict2 = self.scan_for_loop(self.pegloop2)\n if len(indepdict.keys()) == 0 and len(pegdict1.keys()) == 0 and len(pegdict2.keys()) == 0:\n return dict()\n alldict = dict(indepdict)\n alldict.update(pegdict1)\n alldict.update(pegdict2)\n indepcomb=self.get_combo_list(indepdict, 0)\n pegcomb1=self.get_combo_list(pegdict1, 1)\n pegcomb2=self.get_combo_list(pegdict2, 1)\n allcombs = self.combine_three_combo_lists(indepcomb, pegcomb1, pegcomb2)\n datasets = self.prepare_looped_datasets(alldict, allcombs)\n createdfiles = self.create_input_files(datasets)\n if verbose == 1:\n self.print_list(indepcomb)\n self.print_list(pegcomb1)\n self.print_list(pegcomb2)\n self.print_list(allcombs)\n for datakey in datasets:\n self.print_list(datasets[datakey])\n return createdfiles", "def _getfilenames(self):\n\n # Set up the path and file prefix depending on the filetype.\n if self._filetype == 'nightwatch':\n fileprefix = 'qcframe'\n\n if self._location == 'nersc':\n prefix = '/global/project/projectdirs/desi/spectro/nightwatch/kpno'\n elif self._location == 'kpno':\n prefix = '/exposures/desi' # not correct path!\n else:\n raise ValueError('Unknown location {}'.format(self._location))\n elif self._filetype == 'redux':\n fileprefix = 'sframe'\n\n if self._location == 'nersc':\n prefix = '/global/project/projectdirs/desi/spectro/redux/daily/exposures'\n elif self._location == 'kpno':\n prefix = '/exposures/desi' # not correct path!\n else:\n raise ValueError('Unknown location {}'.format(self._location))\n else:\n raise ValueError('Unknown file type {}'.format(self._filetype))\n\n # Find the exposures files.\n exfiles = {}\n for ex in self._exposures:\n folder = '{}/{}/{:08d}'.format(prefix, self._date, ex)\n files = sorted(glob('{}/{}*.fits'.format(folder, fileprefix)))\n exfiles[ex] = files\n\n return exfiles", "def reformat_files(cls, files):\n\n common_path = None # src base\n out_files = []\n for fil in files:\n if isinstance(fil, string_types):\n if len(files) == 1:\n common_prefix = op.dirname(fil) + '/'\n elif common_path is None:\n common_prefix = op.commonprefix(files)\n if fil[len(common_prefix):][0] != '/':\n common_path = op.dirname(common_prefix) + '/'\n else:\n common_path = common_prefix\n out_files.append((fil[len(common_path):], fil, dict()))\n elif not isinstance(fil, collections.Iterable):\n raise ValueError(\"Unexpected format: %s\" % str(fil))\n elif len(fil) == 2: # assume src, dest\n out_files.append((fil[0], fil[1], dict()))\n elif len(fil) == 3:\n out_files.append(fil)\n else:\n raise ValueError(\"Unexpected format: %s\" % str(fil))\n return out_files", "def ppt_files_to_dict(self):\n if len(self.ppt_path_list) == 0:\n return\n\n for file_path in self.ppt_path_list:\n self.ppt_file_to_dict(file_path)", "def scan(root: str, config: dict) -> dict:\n\n extensions = config['extensions']\n exceptions = config['exceptions']\n\n # get list of directories, with their contents (files and sub-directories)\n dirs_by_path = {\n get_path_from_common_root(dir_path, root): (\n dir_path,\n dir_names,\n [name for name in file_names if is_valid_file_name(name, config)]\n )\n for dir_path, dir_names, file_names in os.walk(root)\n }\n\n dirs_by_path = {key: value for key, value in dirs_by_path.items() if value[1] or value[2]}\n\n # get list of file paths\n file_path_list = [\n (file_dir, file_name)\n for file_dir, _, file_names in dirs_by_path.values()\n for file_name in file_names\n ]\n\n files_by_path = {}\n # todo: put below loop contents into function, used in above list comprehension\n for file_dir, file_name_with_extension in file_path_list:\n # todo: remove code file definition from here to allow this to be reused for non-code related files\n # todo: refactor to use inheritable File class instead of named tuple, to be reused for non-code files\n\n if file_name_with_extension in exceptions:\n continue\n\n file = CodeFile(\n imports=[],\n exports=[],\n local_params=[],\n blocks=[],\n dir=file_dir,\n name=file_name_with_extension.rsplit('.', 1)[0],\n extension=file_name_with_extension.rsplit('.', 1)[1]\n )\n\n parse_file(file, config)\n file_no_dupes = remove_duplicate_entries(file)\n\n dir_key = get_path_from_common_root(file_dir, root)\n if dir_key not in files_by_path:\n files_by_path[dir_key] = []\n\n files_by_path[dir_key].append(file_no_dupes)\n\n return {'dirs_by_path': dirs_by_path, 'files_by_path': files_by_path,\n 'starting_point': get_path_from_common_root(root, root)}", "def build():\n for root, dirs, files in os.walk(IN_PATH):\n for filename in files:\n if filename.endswith('.csv'):\n with open(os.path.join(IN_PATH, filename), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n path = row[0].split('.')\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = row[1]\n else:\n d = d[path[i]]\n with open (os.path.join(OUT_PATH, filename.replace('.csv', '.json')), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open (os.path.join(WEB_PATH, filename.replace('.csv', '.js')), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))\n\n with open(os.path.join(IN_PATH, 'en_US.csv'), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n path = row[0].split('.')\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = zz_string(row[1], row[0])\n else:\n d = d[path[i]]\n with open(os.path.join(OUT_PATH, 'zz_ZZ.json'), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open(os.path.join(WEB_PATH, 'zz_ZZ.js'), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))", "def fileAgglomeration(self, dataset: list):\n result = dict()\n\n startTimeForAgglomeration = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n print(\"CPU Model,Index, Filename, Elapsed Time\")\n for idx, e in enumerate(dataset):\n # CPU TIME\n startTime = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n result[idx] = self._count_occurrences(filename=e)\n endTime = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n\n # CPU Model, Index, Filename, Time Taken Processing File\n fileName = e.split(\"/\")[-1]\n print(f\"{self.cpuModel},{idx + 1},{fileName},{endTime - startTime}\") # Logger ^ Markdown\n\n endTimeForAgglomeration = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n print(\n f\"Total Files Aggregated: {len(dataset)} and total {endTimeForAgglomeration - startTimeForAgglomeration} seconds elapsed.\")\n\n return result", "def load_groups(files):\n groups = defaultdict(list)\n for f in files:\n d = np.load(f, allow_pickle=True)\n gkey = to_group_key(d['args'].item()._get_kwargs())\n groups[gkey].append((f, d))\n return groups", "def prepare_fastq(Fastq_Root=\"2.Fastq/\", ):\n fastqs = glob.glob(Fastq_Root + \"*.fastq\")\n data = {}\n for fq in fastqs:\n s = os.path.split(fq)[1]\n s = s.replace(\".fastq\", \"\")\n if s.endswith(\"_1\"):\n sample = s.replace(\"_1\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][0] = fq\n if s.endswith(\"_2\"):\n sample = s.replace(\"_2\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][1] = fq\n if not s.endswith(\"_1\") and not s.endswith(\"_2\"):\n data[s] = [fq]\n return data", "def getAllForImages(self):\n imageDict = {}\n for id, name in self.getAll().items():\n imageDict[id] = {}\n imageDict[id][\"name\"] = name\n imageDict[id][\"filename\"] = \"The_Steamer_Great_Western_small.jpg\"\n\n return imageDict", "def common_files(self):\n return [\n (\"root_dir\", self.root_dir),\n (\"smrtlink_dir\", self.smrtlink_job_dir),\n (\"validation_report_csv\", self.validation_report_csv),\n (\"polymerase_readlength_csv\", self.polymerase_readlength_csv),\n (\"ccs_readlength_csv\", self.ccs_readlength_csv),\n (\"flnc_readlength_csv\", self.flnc_readlength_csv),\n (\"consensus_isoforms_readlength_csv\", self.consensus_isoforms_readlength_csv),\n (\"hq_readlength_csv\", self.hq_readlength_csv),\n (\"lq_readlength_csv\", self.lq_readlength_csv),\n (\"isoseq_flnc_fasta\", self.isoseq_flnc_fa),\n (\"consensus_isoforms_fasta\", self.consensus_isoforms_fa),\n (\"hq_isoforms_fasta\", self.hq_isoforms_fa)\n ]", "def process_data_group(folder:Path, type:str, light:bool = False) -> dict:\n\n if type == dm.Delivery:\n data_folder = folder / 'data'\n else:\n data_folder = folder\n\n # check for non-existent or empty folder\n if not data_folder.exists():\n raise FileNotFoundError\n try:\n next((data_folder).glob(\"**/*\"))\n except StopIteration:\n # folder is empty can't process it\n raise FileNotFoundError\n\n # Get file sizes, last modified dates, and names to count,\n # sum size, and hash the file data provided\n file_sizes, file_modified_dates, file_metamodified_dates, file_names = zip(\n *[\n (f.stat().st_size, f.stat().st_mtime, f.stat().st_ctime, f)\n for f in (data_folder).glob(\"**/*\")\n if f.is_file() and f.name != 'receipt.rst'\n ]\n )\n\n last_modified = datetime.fromtimestamp(\n max(max(file_modified_dates),\n max(file_metamodified_dates)))\n\n # Hash the files in the delivery\n if light:\n folder_hash = 'skipped'\n else:\n folder_hash = hash_files(file_names)\n\n dg = {\n 'name' : folder.name,\n 'type' : type.__name__,\n 'last_update' : datetime.now(),\n 'size' : sum(file_sizes),\n 'num_files' : len(file_sizes),\n 'group_hash' : folder_hash,\n 'group_last_modified' : last_modified,\n }\n\n return dg", "def get_files_io():\n if GC.conf['general']['training']:\n files_zip = {\n 'raw': os.path.join(COOKED_DATA, 'train.txt'),\n 'new': os.path.join(COOKED_DATA, 'train_new.txt'),\n 'norm': os.path.join(COOKED_DATA, 'train_norm.txt'),\n 'manu': os.path.join(RAW_DATA, 'others', 'temp_updt_manu.txt'),\n 'labels': os.path.join(TRAIN_DATA, 'train_norm.txt_labels.pkl'),\n 'segll': os.path.join(TRAIN_DATA, 'train_norm.txt_seginf_loglab.pkl'),\n 'segdl': os.path.join(TRAIN_DATA, 'train_norm.txt_seginf_deeplog.pkl'),\n 'struct': os.path.join(TRAIN_DATA, 'train_norm.txt_structured.csv'),\n 'output': TRAIN_DATA\n }\n else:\n files_zip = {\n 'raw': os.path.join(COOKED_DATA, 'test.txt'),\n 'new': os.path.join(COOKED_DATA, 'test_new.txt'),\n 'norm': os.path.join(COOKED_DATA, 'test_norm.txt'),\n 'labels': os.path.join(TEST_DATA, 'test_norm.txt_labels.pkl'),\n 'segll': os.path.join(TEST_DATA, 'test_norm.txt_seginf_loglab.pkl'),\n 'segdl': os.path.join(TEST_DATA, 'test_norm.txt_seginf_deeplog.pkl'),\n 'map_norm_raw': os.path.join(TEST_DATA, 'map_norm_raw.pkl'),\n 'map_norm_rcv': os.path.join(TEST_DATA, 'map_norm_rcv.pkl'),\n 'norm_rcv': os.path.join(TEST_DATA, 'test_norm_rcv.txt'),\n 'struct': os.path.join(TEST_DATA, 'test_norm.txt_structured.csv'),\n 'struct_rcv': os.path.join(TEST_DATA, 'test_norm_rcv.txt_structured.csv'),\n 'top': os.path.join(TEST_DATA, 'analysis_summary_top.txt'),\n 'sum': os.path.join(TEST_DATA, 'analysis_summary.csv'),\n 'rst_llab': os.path.join(TEST_DATA, 'results_loglab.csv'),\n 'rst_dlog': os.path.join(TEST_DATA, 'results_deeplog.txt'),\n 'rst_llzr': os.path.join(TEST_DATA, 'results_loglizer.csv'),\n 'dbg': os.path.join(TEST_DATA, 'debug.csv'),\n 'output': TEST_DATA\n }\n return files_zip", "def combine(tgt_files):\n mapped_files = {}\n for tgt_file in tgt_files:\n f_key, f_map = map_fes(tgt_file)\n\n if f_key is not None:\n mapped_files[f_key] = f_map\n\n combo_dict = {}\n for key, cur_dict in sorted(mapped_files.items()):\n logger.debug(\"Processing timestep '{}'\".format(key))\n combo_dict.update(cur_dict)\n return combo_dict", "def to_build_file_globs(self, build_patterns: Iterable[str]) -> set[str]:\n return {\n os.path.join(f, pattern)\n for pattern in build_patterns\n for f in recursive_dirname(self.directory)\n }", "def _read_output_files(self):\n self.manage = {} # Empty the dictionary matching phrases\n self.manage['spin'] = (re.compile(' *net spin of'), self._read_spin)\n self.manage['nelect'] = (re.compile(' *number of electrons'), self._read_nelect)\n self.manage['cellcontents'] = (re.compile(' *Unit Cell'), self._read_cellcontents)\n self.manage['pspots'] = (re.compile(' *Files used for pseudopotentials:'), self._read_pspot)\n self.manage['masses'] = (re.compile(' *Mass of species in AMU'), self._read_masses)\n self.manage['kpoints'] = (re.compile(' *Number of kpoints used'), self._read_kpoints)\n self.manage['kpoint_grid'] = (re.compile(' *MP grid size for SCF'), self._read_kpoint_grid)\n self.manage['finalenergy'] = (re.compile(' *Final energy, E'), self._read_energies)\n self.manage['finalenergy2'] = (re.compile('Final energy ='), self._read_energies2)\n self.manage['finalenergy3'] = (re.compile('Dispersion corrected final energy'), self._read_energies3)\n self.manage['energy_cutoff'] = (re.compile(' *plane wave basis set cut'), self._read_energy_cutoff)\n self.manage['nbands'] = (re.compile(' *number of bands'), self._read_nbands)\n self.manage['pressure'] = (re.compile(' *\\* *Pressure: '), self._read_external_pressure)\n self.manage['opticalDielectric'] = (re.compile(' *Optical Permittivity'), self._read_dielectric)\n self.manage['bornCharges'] = (re.compile(' *Born Effective Charges'), self._read_born_charges)\n # For the .phonon file\n self.manage['frequency'] = (re.compile(' q-pt= 1 0.000000 0.000000 0.000000 1.0000000000 *$'), self._read_frequencies)\n self.manage['nbranches'] = (re.compile(' Number of branches'), self._read_nbranches)\n for f in self._outputfiles:\n self._read_output_file(f)\n return", "def generate_merged_file(json_path:str) -> dict:\n filenames = os.listdir(json_path)\n content_map = defaultdict(dict)\n\n for filename in filenames:\n with open(os.path.join(json_path, filename)) as json_file:\n data = json.load(json_file)\n patientID = str(data['patient']['PatientID'])\n studyUID = str(data['study']['StudyInstanceUID'])\n studyDetails = data['study']\n seriesDetails = data['series']\n seriesInstanceUID = data['series']['SeriesInstanceUID']\n\n if(studyUID not in content_map[patientID]) :\n content_map[patientID][studyUID]=studyDetails\n \n if('Series' not in content_map[patientID][studyUID] ):\n content_map[patientID][studyUID]['Series']={}\n content_map[patientID][studyUID]['Series'][seriesInstanceUID]=seriesDetails\n content_map[patientID][studyUID]['Series'][seriesInstanceUID]['path'] = data['path']\n content_map[patientID][studyUID]['Series'][seriesInstanceUID]['files'] = data['files']\n\n return content_map", "def create_dictionary_of_old_and_new_paths(raw_dir: Path, bids_dir: Path, exclude_fieldmaps: bool) -> dict:\n\n old_and_new_paths = {}\n old_paths = list(raw_dir.rglob(\"*\"))\n print(f\"Sorting {len(old_paths)} paths into a dictionary.\") \n\n def task_name_of(path_to_func_or_json):\n \"\"\"\n Returns the task name of a func or json file. This function OVERWRITES the function\n \"task_name_of\" that we imported at the top of this script. BUT! It only overwrites it\n HERE, in \"create_dictionary_of_old_and_new_paths\".\n \"\"\"\n\n list_of_lines_containing_raw_subject_info = []\n for path in old_paths:\n if filetype_of(path) == \"subject info\":\n list_of_lines_containing_raw_subject_info = path.read_text().splitlines()\n break\n\n for line in list_of_lines_containing_raw_subject_info:\n if path_to_func_or_json.stem in line:\n return line.split(\"z\")[1]\n\n\n for old_path in old_paths:\n\n new_path = old_path\n\n if filetype_of(old_path.with_suffix(\".nii\")) == \"anat\" and acquisition_number_of(old_path) == \"14\":\n new_path = bids_dir / f\"sub-{subject_id_of(old_path)}\" / \"anat\" / f\"sub-{subject_id_of(old_path)}_T1w{old_path.suffix}\"\n\n elif filetype_of(old_path.with_suffix(\".nii\")) == \"func\" and acquisition_number_of(old_path) != \"02\":\n new_path = bids_dir / f\"sub-{subject_id_of(old_path)}\" / \"func\" / f\"sub-{subject_id_of(old_path)}_task-{task_name_of(old_path)}_acq-{acquisition_number_of(old_path)}_bold{old_path.suffix}\"\n\n elif filetype_of(old_path.with_suffix(\".nii\")) == \"fieldmap\" and not exclude_fieldmaps:\n old_affix = old_path.stem.split(\"_\")[-1]\n new_affix = \"phasediff\"\n if old_affix == \"e1\":\n new_affix = \"magnitude1\"\n if old_affix == \"e2\":\n new_affix = \"magnitude2\"\n new_path = bids_dir / f\"sub-{subject_id_of(old_path)}\" / \"fmap\" / f\"sub-{subject_id_of(old_path)}_{new_affix}{old_path.suffix}\"\n\n old_and_new_paths[old_path] = new_path\n\n print(\"Paths sorted.\")\n\n return old_and_new_paths", "def process_group(directory: str, files: dict, channel: str, year: str) -> dict:\n if len(files) == 0:\n raise Exception('empty file list for directory {}'.format(directory)) + 1\n\n dataframes = {}\n for name, ifile in files.items():\n # equivalent of hadding\n update_dfs = uproot.pandas.iterate(ifile, f'{channel}_tree')\n current_dfs = []\n for update_df in update_dfs:\n update_df.fillna(-999, inplace=True)\n current_dfs.append(update_df)\n \n if len(current_dfs) > 0:\n dataframes[name] = pd.concat(current_dfs)\n\n dataframes['metadata'] = pd.DataFrame({'channel': [channel], 'year': [year]})\n return dataframes", "def parse_all_controlfiles(dirs: List[Path]) -> Dict[str, BinaryPackage]:\n pkgs = {}\n for path in dirs:\n if not Path(path / \"debian/control\").exists():\n continue\n pkgs.update(parse_controlfile(path))\n return pkgs", "def allinfo(self, *path):\n files = self.listfiles(*path)\n dic = {}\n for filename in files:\n dic[filename] = self.info(*filename)\n return dic", "def build(self) -> None:\n def do_process(fname) -> bool:\n for sfx in skip_suffixes:\n if fname.endswith(sfx):\n return False\n return True\n\n for dirpath, _, fnames in os.walk(self.template_dir):\n for fname in fnames:\n if do_process(fname):\n self.process(dirpath, fname)", "def get_result_files(self):\n name_pattern = \"{mapper}.{ngs_library.name}\"\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"out\", name_pattern + \"{ext}\"), ext=EXT_VALUES\n )\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"log\", \"{mapper}.{ngs_library.name}.{ext}\"),\n ext=(\n \"log\",\n \"conda_info.txt\",\n \"conda_list.txt\",\n \"log.md5\",\n \"conda_info.txt.md5\",\n \"conda_list.txt.md5\",\n ),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt.md5\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html.md5\"\n )\n )\n\n for sheet in self.shortcut_sheets:\n for ngs_library in sheet.all_ngs_libraries:\n if ngs_library.name in self.ngs_library_to_kit:\n extraction_type = ngs_library.test_sample.extra_infos[\"extractionType\"]\n suffix = (\n \"_long\"\n if ngs_library.extra_infos[\"seqPlatform\"] in (\"PacBio\", \"ONP\")\n else \"\"\n )\n # Per-sample target coverage report.\n yield from expand(\n os.path.join(\n \"output\", name_pattern, \"report\", \"cov_qc\", name_pattern + \".{ext}\"\n ),\n mapper=self.config[\"tools\"][extraction_type.lower() + suffix],\n ngs_library=[ngs_library],\n ext=[\"txt\", \"txt.md5\"],\n )\n yield \"output/target_cov_report/out/target_cov_report.txt\"\n yield \"output/target_cov_report/out/target_cov_report.txt.md5\"\n if (\n self.config[\"picard_hs_metrics\"][\"path_targets_interval_list\"]\n and self.config[\"picard_hs_metrics\"][\"path_baits_interval_list\"]\n ):\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt.md5\"\n )\n )\n if self.config[\"compute_coverage_bed\"]:\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"report\", \"coverage\", name_pattern + \"{ext}\"),\n ext=(\".bed.gz\", \".bed.gz.tbi\"),\n )\n else:\n print(\n \"Genome-wide coverage BED generation disabled\", file=sys.stderr\n ) # pragma: no cover", "def manifest(self):\n yield self._meta\n for dir_key, meta in self._walk_dir_meta():\n yield {'logical_key': dir_key, 'meta': meta}\n for logical_key, entry in self.walk():\n yield {'logical_key': logical_key, **entry.as_dict()}", "def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n for charge_selection in self.charge_selections:\n key_dir = getKey(process_name, charge_selection)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.configDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.configDir, dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n ##print \"self.dirs = \", self.dirs\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_name, sample_info, self.max_files_per_job, self.debug)\n \n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name)) \n\n is_mc = (sample_info[\"type\"] == \"mc\")\n lumi_scale = 1. if not (self.use_lumi and is_mc) else sample_info[\"xsection\"] * self.lumi / sample_info[\"nof_events\"]\n apply_genWeight = sample_info[\"apply_genWeight\"] if (is_mc and \"apply_genWeight\" in sample_info.keys()) else False\n sample_category = sample_info[\"sample_category\"]\n triggers = sample_info[\"triggers\"]\n apply_trigger_bits = (is_mc and (self.era == \"2015\" or (self.era == \"2016\" and sample_info[\"reHLT\"]))) or not is_mc\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n\n inputFileList = inputFileLists[sample_name]\n for jobId in inputFileList.keys():\n if central_or_shift != \"central\" and not is_mc:\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttH\") and sample_category != \"signal\":\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttW\") and sample_category != \"TTW\":\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttZ\") and sample_category != \"TTZ\":\n continue\n\n # build config files for executing analysis code\n key_dir = getKey(process_name, charge_selection)\n key_analyze_job = getKey(process_name, charge_selection, central_or_shift, jobId)\n\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n print \"Warning: ntupleFiles['%s'] = %s --> skipping job !!\" % (key_job, ntupleFiles)\n continue\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : os.path.join(self.dirs[key_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%s_%i_cfg.py\" % \\\n (self.channel, process_name, charge_selection, central_or_shift, jobId)),\n 'histogramFile' : os.path.join(self.dirs[key_dir][DKEY_HIST], \"%s_%s_%s_%i.root\" % \\\n (process_name, charge_selection, central_or_shift, jobId)),\n 'logFile' : os.path.join(self.dirs[key_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%s_%i.log\" % \\\n (self.channel, process_name, charge_selection, central_or_shift, jobId)),\n 'sample_category' : sample_category,\n 'triggers' : sample_info[\"triggers\"],\n 'charge_selection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_denominator' : self.hadTau_selection_denominator,\n 'hadTau_selections_numerator' : self.hadTau_selections_numerator,\n 'absEtaBins' : self.absEtaBins,\n ##'use_HIP_mitigation_mediumMuonId' : sample_info[\"use_HIP_mitigation_mediumMuonId\"],\n 'use_HIP_mitigation_mediumMuonId' : True,\n 'is_mc' : is_mc,\n 'central_or_shift' : central_or_shift,\n 'lumi_scale' : 1. if not (self.use_lumi and is_mc) else sample_info[\"xsection\"] * self.lumi / sample_info[\"nof_events\"],\n 'apply_genWeight' : sample_info[\"genWeight\"] if (is_mc and \"genWeight\" in sample_info.keys()) else False,\n 'apply_trigger_bits' : (is_mc and (self.era == \"2015\" or (self.era == \"2016\" and sample_info[\"reHLT\"]))) or not is_mc,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job])\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1 = getKey(process_name, charge_selection)\n if not key_hadd_stage1 in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1] = os.path.join(self.dirs[DKEY_HIST], \"histograms_harvested_stage1_%s_%s_%s.root\" % \\\n (self.channel, process_name, charge_selection))\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1 = getKey(process_name, charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n if not key_hadd_stage2 in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2].append(self.outputFile_hadd_stage1[key_hadd_stage1])\n self.outputFile_hadd_stage2[key_hadd_stage2] = os.path.join(self.dirs[DKEY_HIST], \"histograms_harvested_stage2_%s_%s.root\" % \\\n (self.channel, charge_selection))\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n key_comp_jetToTauFakeRate_job = getKey(charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_cfg.py\" % charge_selection),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s.log\" % charge_selection),\n 'looseRegion' : \"jetToTauFakeRate_%s/denominator/\" % charge_selection,\n 'tightRegion' : \"jetToTauFakeRate_%s/numerator/\" % charge_selection,\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n self.targets.append(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile'])\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_makePlots_job = getKey(charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_makePlots_job = getKey(charge_selection, absEtaBin, \"denominator\")\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_%s_denominator_%s_cfg.py\" % (self.channel, charge_selection, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s/denominator/%s\" % (charge_selection, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_makePlots_job = getKey(charge_selection, absEtaBin, \"numerator\", hadTau_selection_numerator)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_%s_numerator_%s_%s_cfg.py\" % (self.channel, charge_selection, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s_%s_numerator_%s_%s.png\" % (self.channel, charge_selection, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s/numerator/%s/%s\" % (charge_selection, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile)\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n \n logging.info(\"Done\")", "def build_basenames():\r\n dict = {}\r\n with open(STREETS_FILE) as file:\r\n for line in file:\r\n dict[line.strip()] = True\r\n return dict", "def _collect_entries(rootdir: str, basedir: str):\n\n files = []\n dirs = []\n\n for entry in os.listdir(os.path.join(rootdir, basedir)):\n rel_path = os.path.join(basedir, entry)\n full_path = os.path.join(rootdir, rel_path)\n isdir = os.path.isdir(full_path)\n if isdir and (rel_path in ('./.git', './.pytest_cache') or entry == '__pycache__'):\n continue\n\n st = os.stat(full_path, follow_symlinks=False)\n\n (dirs if isdir else files).append((rel_path, dict(isdir=isdir, path=rel_path, size=(0 if isdir else st.st_size),\n mode=st.st_mode, omode=f'{st.st_mode:04o}',\n mtime=int(st.st_mtime))))\n\n for rel_path, entry in sorted(dirs):\n yield entry\n yield from _collect_entries(rootdir, rel_path)\n\n for _, entry in sorted(files):\n yield entry", "def collect_files(self):\n self.files = []\n for bundle in self.bundles:\n bundle.init_build(self, self.builder)\n bundle_files = bundle.prepare()\n self.files.extend(bundle_files)\n return self", "def compute_products(self):\r\n src_to_classfiles = defaultdict(list)\r\n for pcd_entry in self.pcd_entries:\r\n srcfile = pcd_entry[1]\r\n # In the file classes are represented with slashes, not dots. E.g., com/foo/bar/Baz.\r\n src_to_classfiles[srcfile].append(pcd_entry[0] + '.class')\r\n return src_to_classfiles" ]
[ "0.7275469", "0.71176326", "0.7100193", "0.70664155", "0.7045975", "0.70392615", "0.6858053", "0.6840117", "0.6648288", "0.66272867", "0.64521015", "0.6448374", "0.6421026", "0.6416618", "0.63611877", "0.6361177", "0.63366175", "0.6319295", "0.6307999", "0.62980103", "0.62935305", "0.6249918", "0.6248881", "0.61830884", "0.6148028", "0.6143925", "0.61077875", "0.6105653", "0.60510904", "0.6050868", "0.6036202", "0.60220474", "0.60170853", "0.59976494", "0.59661347", "0.5960784", "0.595487", "0.59200996", "0.59124464", "0.59124184", "0.5904833", "0.5900824", "0.5895679", "0.5891648", "0.58679336", "0.5859436", "0.58387077", "0.58373505", "0.5831541", "0.5826944", "0.5819327", "0.5808121", "0.5807", "0.5792781", "0.5790345", "0.57876915", "0.57827246", "0.5781419", "0.5774129", "0.5769643", "0.5764633", "0.5761963", "0.5758661", "0.5757562", "0.574111", "0.57309353", "0.57168734", "0.5715816", "0.571339", "0.57132095", "0.57129145", "0.57047707", "0.5704682", "0.5703541", "0.5703242", "0.56972134", "0.56968737", "0.5695636", "0.56848216", "0.56640655", "0.56569517", "0.5653277", "0.5652911", "0.56522775", "0.56420976", "0.56379867", "0.56344944", "0.56300366", "0.56298155", "0.5629092", "0.56272817", "0.56229943", "0.5602331", "0.55975163", "0.559474", "0.5594213", "0.55876815", "0.55875593", "0.55864155", "0.5583011" ]
0.66944414
8
For the given grouping, convert ROOT files into DataFrames merging groups together. Return a dictionary mapping file names to DataFrames.
def process_group(directory: str, files: dict, channel: str, year: str) -> dict: if len(files) == 0: raise Exception('empty file list for directory {}'.format(directory)) + 1 dataframes = {} for name, ifile in files.items(): # equivalent of hadding update_dfs = uproot.pandas.iterate(ifile, f'{channel}_tree') current_dfs = [] for update_df in update_dfs: update_df.fillna(-999, inplace=True) current_dfs.append(update_df) if len(current_dfs) > 0: dataframes[name] = pd.concat(current_dfs) dataframes['metadata'] = pd.DataFrame({'channel': [channel], 'year': [year]}) return dataframes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_processed_data(self, group_directory):\n processed_dir = [x for x in group_directory.iterdir()\n if x.is_dir() and 'processed' in x.name][0]\n\n task_dirs = [x for x in processed_dir.iterdir()\n if x.is_dir() and 'task' in x.name]\n\n files = dict()\n for task in task_dirs:\n task_camera_dirs = [x for x in task.iterdir()\n if x.is_dir() and 'pc' in x.name]\n\n task_frame_files = list()\n if task_camera_dirs:\n task_frame_files = dict()\n for camera_dir in task_camera_dirs:\n task_frame_files[camera_dir.name] = [x for x in camera_dir.iterdir()\n if not x.is_dir() and x.suffix in VALID_OUTPUT_FILE_TYPES]\n\n for camera, frame_files in task_frame_files.items():\n for frame_file in frame_files:\n frame = int(re.search(r'(?<=_)(\\d{12})(?=_)',\n frame_file.name).group(0))\n if task.name not in files:\n files[task.name] = dict()\n\n if frame not in files[task.name]:\n files[task.name][frame] = dict()\n\n files[task.name][frame][camera] = frame_file\n\n else:\n task_frame_files = [x for x in task.iterdir()\n if not x.is_dir()\n and x.suffix in VALID_OUTPUT_FILE_TYPES]\n\n for frame_file in task_frame_files:\n frame = int(re.search(r'(?<=_)(\\d{12})(?=_)',\n frame_file.name).group(0))\n if task.name not in files:\n files[task.name] = dict()\n files[task.name][frame] = frame_file\n\n return files", "def _load_group_data(directory='', file_name='', df=True):\n\n # check if folder exists with experiment name\n if os.path.isdir(directory) is False:\n print 'making new directory to save data'\n os.mkdir(directory)\n \n # all files in directory\n files = os.listdir(directory)\n\n # if data file already exists\n if file_name in files:\n print 'group data found:', file_name\n\n # if data stored as pandas dataframe\n if df:\n # load data\n print directory+file_name\n group_data = pd.read_pickle(directory+file_name)\n print 'group data loaded'\n\n # if stored as dictionary\n else:\n # load data\n with open(directory+file_name, 'rb') as pkl_file:\n group_data= pickle.load(pkl_file)\n print 'group data loaded'\n\n # otherwise create data structure\n else:\n # data organized as {frequency}{syn distance}{number of synapses}{polarity}[trial]{data type}{tree}[section][segment][spikes]\n print 'no group data found'\n if df:\n group_data = pd.DataFrame()\n else:\n group_data= {}\n\n return group_data", "def get_clean_data(self, group_directory):\n clean_dir = [x for x in group_directory.iterdir()\n if x.is_dir() and 'clean' in x.name][0]\n\n task_dirs = [x for x in clean_dir.iterdir()\n if x.is_dir() and 'task' in x.name]\n\n files = dict()\n for task in task_dirs:\n task_camera_directory = [x for x in task.iterdir()\n if x.is_dir() and 'pc' in x.name]\n\n camera_files = dict()\n for camera_directory in task_camera_directory:\n camera_id = camera_directory.name\n camera_frame_files = [x for x in camera_directory.iterdir()\n if not x.is_dir() and x.suffix in VALID_OUTPUT_FILE_TYPES]\n for frame_file in camera_frame_files:\n frame = int(re.search(r'(?<=_)(\\d{12})(?=_)',\n frame_file.name).group(0))\n if camera_directory.name not in camera_files:\n camera_files[camera_id] = dict()\n\n camera_files[camera_id][frame] = frame_file\n files[task.name] = camera_files\n\n return files", "def split(df, group):\n\n data = namedtuple(\"data\", [\"filename\", \"object\"]) #initiate \"data\" tyoe\n gb = df.groupby(group) #group df by group attribute\n return [\n data(filename, gb.get_group(x))\n for filename, x in zip(gb.groups.keys(), gb.groups)\n ]", "def get_data(paths, df_names, categorical_feats, groupby=None, exclude_classes=[], rel_cols=None, sep=\",\"):\n\n def _load_data(path, sep=sep):\n \"\"\"small function to load according to the dataformat. (excel or csv)\"\"\"\n filename, file_extension = os.path.splitext(path)\n\n if file_extension in [\".csv\", \".tsv\"]:\n df = pd.read_csv(path, index_col=0, sep=sep)\n else:\n df = pd.read_excel(path, index_col=0)\n\n return df\n\n # initialize list to store dataframes in\n dfs = []\n\n # Handle single path input\n if groupby and (len(paths) == 1 or isinstance(paths, str)):\n\n # load data depending on if the single path is given in a list of as string\n if isinstance(paths, str):\n data = _load_data(paths, sep)\n elif isinstance(paths, list):\n data = _load_data(*paths, sep)\n else:\n raise ValueError(\"It seems like the input was a single path. Please input path as string or inside a list.\")\n\n grouping = data.groupby(groupby)\n\n # split dataframe groups and create a list with all dataframes\n for name, grp in grouping:\n # skip class if it should be excluded\n if name in exclude_classes:\n continue\n\n df = grouping.get_group(name)[::]\n\n # consider all columns as relevant is no rel_cols given.\n if rel_cols is None:\n rel_cols = list(df)\n\n # consider the relevant columns\n dfs.append(df[rel_cols])\n\n # Handle multiple paths input\n elif len(paths) > 1:\n for path in paths:\n df = _load_data(path)\n dfs.append(df)\n\n return DataCollection(dfs, df_names, categorical_feats)", "def generate_group_summary_table(self, groups, group_names=None):\n output = {\n 'patient_patches': {},\n 'slide_patches': {},\n 'patient_slides': {},\n }\n groups['chunks'].sort(key=lambda chunk: chunk['id'])\n category_names = sorted([c.name for c in self.CategoryEnum])\n cum_header = 'Overall' if self.is_binary else 'Total'\n headers = category_names + [cum_header]\n num_headers = len(headers)\n group_patches = pd.DataFrame(columns=headers)\n group_slides = pd.DataFrame(columns=headers)\n group_patients = pd.DataFrame(columns=headers)\n for chunk in groups['chunks']:\n try:\n group_name = group_names[chunk['id']]\n except (TypeError, KeyError):\n group_name = f\"Group {chunk['id'] + 1}\"\n patch_paths = chunk['imgs']\n patches = {name: set() for name in category_names}\n slides = {name: set() for name in category_names}\n patients = {name: set() for name in category_names}\n all_patches = set()\n all_slides = set()\n all_patients = set()\n patient_patches = pd.DataFrame(columns=headers)\n slide_patches = pd.DataFrame(columns=headers)\n patient_slides = pd.DataFrame(columns=headers)\n for patch_path in patch_paths:\n patch_id = utils.create_patch_id(patch_path, self.patch_pattern)\n label = utils.get_label_by_patch_id(patch_id, self.patch_pattern,\n self.CategoryEnum, is_binary=self.is_binary).name\n slide_name = utils.get_slide_by_patch_id(patch_id, self.patch_pattern)\n patient_id = utils.get_patient_by_slide_id(slide_name,\n dataset_origin=self.dataset_origin)\n\n patches[label].add(patch_id)\n\n if slide_name not in slides[label]:\n if patient_id not in patient_slides.index:\n patient_slides.loc[patient_id] = [0] * num_headers\n patient_slides.at[patient_id, label] += 1\n if slide_name not in all_slides:\n patient_slides.at[patient_id, cum_header] += 1\n \n slides[label].add(slide_name)\n patients[label].add(patient_id)\n\n if patient_id not in patient_patches.index:\n patient_patches.loc[patient_id] = [0] * num_headers\n patient_patches.at[patient_id, label] += 1\n patient_patches.at[patient_id, cum_header] += 1\n\n if slide_name not in slide_patches.index:\n slide_patches.loc[slide_name] = [0] * num_headers\n slide_patches.at[slide_name, label] += 1\n slide_patches.at[slide_name, cum_header] += 1\n\n all_patches.add(patch_id)\n all_slides.add(slide_name)\n all_patients.add(patient_id)\n\n for label, s in patches.items():\n group_patches.at[group_name, label] = len(s)\n group_patches.at[group_name, cum_header] = len(all_patches)\n for label, s in slides.items():\n group_slides.at[group_name, label] = len(s)\n group_slides.at[group_name, cum_header] = len(all_slides)\n for label, s in patients.items():\n group_patients.at[group_name, label] = len(s)\n group_patients.at[group_name, cum_header] = len(all_patients)\n\n patient_patches.loc[\"Total\"] = patient_patches.sum().astype(int)\n slide_patches.loc[\"Total\"] = slide_patches.sum().astype(int)\n patient_slides.loc[\"Total\"] = patient_slides.sum().astype(int)\n output['patient_patches'][group_name] = patient_patches\n output['slide_patches'][group_name] = slide_patches\n output['patient_slides'][group_name] = patient_slides\n \n group_patches.loc['Total'] = group_patches.sum().astype(int)\n group_slides.loc['Total'] = group_slides.sum().astype(int)\n group_patients.loc['Total'] = group_patients.sum().astype(int)\n output['group_patches'] = group_patches\n output['group_slides'] = group_slides\n output['group_patients'] = group_patients\n return output", "def simple_loadings_df(self, group_labels_file, subjid_pat=r'(?P<patid>[a-z]{2}_[0-9]{6})'):\n # make sure file exists\n if not os.path.exists(group_labels_file):\n raise FileNotFoundError('The file {} has not been found.'.format(group_labels_file))\n\n # make sure this object has been .fit()\n self._update()\n\n groups = self._parse_groups_file(group_labels_file=group_labels_file)\n patids = self._get_subject_ids(subjid_pat=subjid_pat)\n\n loads = self._load_loadings()\n\n # build the raw loadings table\n df = build_raw_loadings_table(loads, patids)\n df = add_groups_to_loadings_table(df, groups)\n return df", "def toDataFrame(self, split=True):\n\n def cleanColumns(df):\n # Cleanup columns\n colnames = df.columns\n colnames=[c.replace('\\'','') for c in colnames]\n colnames=[c[1:] if c.startswith('/') else c for c in colnames]\n # If there is only one group, we remove the group key\n groupNames = self.groupNames\n if len(groupNames)==1:\n nChar = len(groupNames[0])\n colnames=[c[nChar+1:] for c in colnames] # +1 for the \"/\"\n df.columns = colnames\n\n fh = self['data']\n if split:\n # --- One dataframe per group. We skip group that have empty data\n dfs={}\n for group in fh.groups():\n try:\n df = group.as_dataframe(time_index=True)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = group.as_dataframe(time_index=False)\n if len(df)>0:\n dfs[group.name] = df\n if len(dfs)==1:\n dfs=dfs[group.name]\n return dfs\n else:\n # --- One dataframe with all data\n try:\n df = fh.as_dataframe(time_index=True)\n cleanColumns(df)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = fh.as_dataframe(time_index=False)\n return df", "def load_groups(files):\n groups = defaultdict(list)\n for f in files:\n d = np.load(f, allow_pickle=True)\n gkey = to_group_key(d['args'].item()._get_kwargs())\n groups[gkey].append((f, d))\n return groups", "def build_groupings(idir: str) -> dict:\n bkg_group = {key: [ifile for ifile in glob(f'{idir}/*_{key}_*.root')] for key in bkgs}\n pw_group = {key: [ifile for ifile in glob(f'{idir}/{key}*.root')] for key in powhegs}\n wh_pw_group = [ifile for name in wh_powhegs for ifile in glob(f'{idir}/{name}*.root')]\n ungrouped = [ifile for ifile in glob(f'{idir}/*.root') if 'madgraph' in ifile or 'JHU' in ifile]\n\n group = {}\n for key, files in bkg_group.items():\n if len(files) > 0:\n group[key] = files\n\n for key, files in pw_group.items():\n if len(files) > 0:\n group[key] = files\n\n for ifile in ungrouped:\n name = ifile.split('/')[-1].replace('.root', '')\n name = name.split('_SYST')[0].replace('-', '_')\n name = name.replace('_ggH125', '').replace('_VBF125', '').replace('_WH125', '').replace('_ZH125', '')\n group[name] = [ifile]\n\n if len(wh_pw_group) > 0:\n group['wh125_powheg'] = wh_pw_group\n\n return group", "def get_data(self, df, latest_currency):\n file_paths = list(df[\"File\"])\n df = self.extract_df(file_paths[0])\n df = self.group_df(df)\n df = self.fill_league_currency(df, latest_currency)\n for file_path in file_paths[1:]:\n league = self.extract_df(file_path)\n league_grp = self.group_df(league)\n league_grp = self.fill_league_currency(league_grp, latest_currency)\n df = df.join(league_grp)\n df = df.reset_index(drop=True)\n return df", "def group_by_filenames(self):\n package = self.container.config.output.package\n class_map = collections.group_by(self.container, key=get_location)\n groups = self.group_common_paths(class_map.keys())\n\n for keys in groups:\n if len(keys) == 1:\n common_path = os.path.dirname(keys[0])\n else:\n common_path = os.path.commonpath(keys)\n\n for key in keys:\n items = class_map[key]\n suffix = \".\".join(Path(key).parent.relative_to(common_path).parts)\n\n package_name = f\"{package}.{suffix}\" if suffix else package\n self.assign(items, package_name, module_name(key))", "def _generate_datasets(self):\n datasets = list()\n for fname in sorted(os.listdir(self.base_dir)):\n if not self._filename_re.match(fname):\n continue\n\n file_path = os.path.join(self.base_dir, fname)\n try:\n fh = self._open_hdf5(file_path)\n\n except (IOError, OSError) as e:\n warnings.warn('Cannot access {}; skipped'.format(file_path))\n print(e)\n continue\n\n for key in fh:\n if self._groupname_re.match(key.lstrip('/')):\n datasets.append(ObjectTableWrapper(fh, key, self._schema))\n continue\n\n warn_msg = 'incorrect group name \"{}\" in {}; skipped this group'\n warnings.warn(warn_msg.format(os.path.basename(file_path), key))\n\n return datasets", "def get_datasets(h5group, prefix=''):\n for key in h5group.keys():\n h5obj = h5group[key]\n path = '{}/{}'.format(prefix, key)\n attrs = {att:val for att, val in h5obj.attrs.items()}\n\n if isinstance(h5obj, h5py.Dataset): \n \n # get metadata\n units = attrs[\"units\"] if 'units' in attrs else None\n spec = attrs[\"datatype\"] if 'datatype' in attrs else None\n \n # special handling for the nested waveform dataset\n if \"waveform/values/cumulative_length\" in path:\n nwfs = h5obj.shape[0]\n \n # must fix datatype AFTER this initial iteration\n yield (path, \"waveform\", nwfs, None, units, spec) \n elif \"waveform\" in path:\n pass\n \n # handle normal 'array<1>{real}' datasets\n else:\n yield (path, key, h5obj.shape[0], h5obj.dtype, units, spec) \n \n # test for group (go down)\n elif isinstance(h5obj, h5py.Group): \n yield from get_datasets(h5obj, path)", "def _split_by_filename(\n df: pd.DataFrame):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby('filename')\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]", "def collect_data(input_folder, ratio):\n # TODO implement ratio\n data = pd.DataFrame()\n\n folderpaths = [os.path.normpath((os.path.join(input_folder, x)))\n for x in os.listdir(input_folder) if not x.endswith('.gitkeep')]\n # for folder in folderpaths:\n for folder in folderpaths:\n filepaths = [os.path.normpath((os.path.join(folder, x)))\n for x in os.listdir(folder) if not x.endswith('.gitkeep')]\n for file in filepaths:\n df = pd.read_pickle(file)\n df = df[df['is_feas'] == 1]\n data = data.append(df[['frames', 'label']], ignore_index=True)\n\n return data.rename(columns={'frames': 'x', 'label': 'y'})", "def weighted_loadings_df(self, group_labels_file, subjid_pat=r'(?P<patid>[a-z]{2}_[0-9]{6})'):\n # make sure file exists\n if not os.path.exists(group_labels_file):\n raise FileNotFoundError('The file {} has not been found.'.format(group_labels_file))\n\n self._update()\n\n # let's first pick the simple version of the loadings\n df = self.simple_loadings_df(group_labels_file, subjid_pat=subjid_pat)\n blobs = get_largest_blobs(self._icc_imgs)\n\n masks = [apply_mask(ic_map, blob) for ic_map, blob in zip(self._icc_imgs, blobs)]\n\n blob_avgs = [mask.mean() for mask in masks]\n\n blob_signs = np.sign(blob_avgs)\n n_ics = len(blob_avgs)\n df[list(range(1, n_ics+1))] = df[list(range(1, n_ics+1))] * blob_signs\n return df", "def read_dfdict_data(datadir, subset=None):\n print('Reading datasets...')\n # Initialize dict to store all dataframes\n dfdict = {}\n\n # If subset of datasets are given, read only those\n if subset is not None:\n with open(subset, 'r') as f:\n datasetids = f.read().splitlines()\n else:\n datasetids = get_dataset_ids(datadir)\n\n # Read each dataset and convert to relative abundance\n for dataset in datasetids:\n print(dataset),\n ## Read dataset\n df, meta = read_dataset_files(dataset, datadir)\n df = raw2abun(df)\n\n ## Get case and control samples\n classes_list = get_classes(meta)\n if len(classes_list[0]) == 0 or len(classes_list[1]) == 0:\n raise ValueError('Something wrong with ' + dataset + ' metadata.')\n H_smpls, dis_smpls = get_samples(meta, classes_list)\n\n dfdict.update({dataset: {'df': df, 'meta': meta, 'dis_smpls': dis_smpls, 'H_smpls': H_smpls, 'classes': classes_list}})\n print('\\nReading datasets... Finished.')\n return dfdict", "def collect2dict(filenames, outdir):\n \n tbldict = {}\n for fn in filenames:\n try:\n path = max(glob.glob(outdir+fn+'*.pkl'), key=os.path.getctime)\n out = pd.read_pickle(path)\n tbldict[fn] = out\n except ValueError:\n print(fn + ' not found in ' + outdir)\n return tbldict", "def defineFileGroups(self, mergeableFiles):\n fileGroups = {}\n foundFiles = []\n\n for mergeableFile in mergeableFiles:\n if mergeableFile[\"file_lfn\"] not in foundFiles:\n foundFiles.append(mergeableFile[\"file_lfn\"])\n else:\n continue\n\n if mergeableFile[\"pnn\"] not in fileGroups:\n if self.mergeAcrossRuns:\n fileGroups[mergeableFile[\"pnn\"]] = []\n else:\n fileGroups[mergeableFile[\"pnn\"]] = {}\n\n if self.mergeAcrossRuns:\n fileGroups[mergeableFile[\"pnn\"]].append(mergeableFile)\n else:\n if mergeableFile[\"file_run\"] not in fileGroups[mergeableFile[\"pnn\"]]:\n fileGroups[mergeableFile[\"pnn\"]][mergeableFile[\"file_run\"]] = []\n fileGroups[mergeableFile[\"pnn\"]][mergeableFile[\"file_run\"]].append(mergeableFile)\n\n return fileGroups", "def create_data_frame(self):\n column_names = Annotations.create_columns(self.headers, self.annot_types)\n dtypes = Annotations.get_dtypes_for_group_annots(self.headers, self.annot_types)\n df = self.open_file(\n self.file_path,\n open_as=\"dataframe\",\n # Coerce values in group annotations\n converters=dtypes,\n # Header/column names\n names=self.headers,\n # Prevent pandas from reading first 2 lines in file\n # since they're passed in with param 'names'\n skiprows=2,\n )[0]\n self.file = Annotations.convert_header_to_multi_index(df, column_names)", "def get_dataframe(self):\n for i, study_id in enumerate(self.studies_to_combine):\n copy = repr(self.original_study_location).strip(\"'\")\n study_location = copy.replace(\"MTBLS1\", study_id)\n\n for maf in self.sort_mafs(study_location, study_id):\n maf_temp = None\n try:\n maf_temp = pandas.read_csv(os.path.join(study_location, maf), sep=\"\\t\", header=0, encoding='unicode_escape')\n except pandas.errors.EmptyDataError as e:\n logger.error(f'EmptyDataError Issue with opening maf file {maf}: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n except Exception as e:\n logger.error(f'Issue with opening maf file {maf}, cause of error unclear: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n\n cleanup_function = getattr(DataFrameUtils, f'{self.method}_maf_cleanup')\n maf_temp = cleanup_function(maf_temp, study_id, maf)\n maf_as_dict = totuples(df=maf_temp, text='dict')['dict']\n\n yield maf_as_dict", "def loadFiles(root=\"data/TAIWAN_RAW_DATA/ADHD\"):\n\tdata_rt = [] # realtime.csv\n\tdata_trial = [] # trialdata.csv\n\tdata_id = [] # caseid/subjectid\n\tRealTime = \"A2RealTime_\"\n\tTrialData = \"A2TrialData_\"\n\tfolder_list = os.listdir(root) # list of subfolders in the root\n\tfor folders in folder_list:\n\t\tfolders_path = os.path.join(root,folders)\n\t\tif folders.find(\"pass\") != -1:\n\t\t\tcontinue\n\t\t\t\n\t\ttry:\n\t\t\tdata_rt.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t RealTime+folders[3:]+\".csv\")))\n\t\t\tdata_trial.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t\t TrialData+folders[3:]+\".csv\")))\n\t\t\tdata_id.append(int(folders.split('_')[1]))\n\t\texcept:\n\t\t\tprint(os.path.join(folders_path,TrialData+folders[3:]+\".csv\"))\n\t\t\t\n\treturn data_rt,data_trial,data_id,folder_list", "def load_groupfile(infile_path):\n name, ext = os.path.splitext(os.path.basename(infile_path))\n if ext == '.tsv':\n df = pd.read_table(infile_path, header=0)\n elif ext == '.csv':\n df = pd.read_csv(infile_path, header=0)\n else:\n raise ValueError(\"File type not supported: \" + ext)\n\n return df", "def convert_subtree(group: h5py.Group, refs: h5py.Group):\n d = {}\n for key in group:\n if key == \"#refs#\":\n continue\n value = group[key]\n if isinstance(value, h5py.Group):\n d[key] = convert_subtree(value, refs=refs)\n elif isinstance(value, h5py.Dataset):\n d[key] = convert_dataset(value, refs=refs)\n else:\n raise ValueError(f\"Can't convert {value} of type {type(value)}.\")\n return d", "def data_frame_creator(self):\n\n rgb_dir = [\n self.dataset_address + sequence_f + rgb_f\n for rgb_f in self.rgb_folder for sequence_f in self.sequence_folder\n ]\n rgb_data = [\n rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)\n ]\n\n depth_dir = [\n self.dataset_address + sequence_f + depth_f\n for depth_f in self.depth_folder\n for sequence_f in self.sequence_folder\n ]\n depth_data = [\n depth_d + depth for depth_d in depth_dir\n for depth in os.listdir(depth_d)\n ]\n\n segmentation_dir = [\n self.dataset_address + sequence_f + segmentation_f\n for segmentation_f in self.segmentation_folder\n for sequence_f in self.sequence_folder\n ]\n segmentation_data = [\n segmentation_d + segmentation\n for segmentation_d in segmentation_dir\n for segmentation in os.listdir(segmentation_d)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'DEPTH': depth_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1)\n\n return pd.DataFrame(dataset)", "def load_data(self, directory, group):\n \n em_images = h5py.File(os.path.join(directory, \"image.h5\"), 'r')\n segmentations = h5py.File(os.path.join(directory, \"human_labels_split.h5\"), 'r')\n\n if group == 'train':\n return em_images['main'][:192], segmentations['main'][:192]\n elif group == 'dev':\n return em_images['main'][192:], segmentations['main'][192:]\n \"\"\"\n em_images = h5py.File(os.path.join(directory, \"voronoi_boundary.h5\"), 'r')\n segmentations = h5py.File(os.path.join(directory, \"voronoi_segmentation.h5\"), 'r')\n\n if group == 'train':\n return em_images['main'][:16], segmentations['main'][:16]\n elif group == 'dev':\n return em_images['main'][16:], segmentations['main'][16:]\n \"\"\"", "def process_data_group(folder:Path, type:str, light:bool = False) -> dict:\n\n if type == dm.Delivery:\n data_folder = folder / 'data'\n else:\n data_folder = folder\n\n # check for non-existent or empty folder\n if not data_folder.exists():\n raise FileNotFoundError\n try:\n next((data_folder).glob(\"**/*\"))\n except StopIteration:\n # folder is empty can't process it\n raise FileNotFoundError\n\n # Get file sizes, last modified dates, and names to count,\n # sum size, and hash the file data provided\n file_sizes, file_modified_dates, file_metamodified_dates, file_names = zip(\n *[\n (f.stat().st_size, f.stat().st_mtime, f.stat().st_ctime, f)\n for f in (data_folder).glob(\"**/*\")\n if f.is_file() and f.name != 'receipt.rst'\n ]\n )\n\n last_modified = datetime.fromtimestamp(\n max(max(file_modified_dates),\n max(file_metamodified_dates)))\n\n # Hash the files in the delivery\n if light:\n folder_hash = 'skipped'\n else:\n folder_hash = hash_files(file_names)\n\n dg = {\n 'name' : folder.name,\n 'type' : type.__name__,\n 'last_update' : datetime.now(),\n 'size' : sum(file_sizes),\n 'num_files' : len(file_sizes),\n 'group_hash' : folder_hash,\n 'group_last_modified' : last_modified,\n }\n\n return dg", "def load_all(self, root_dir, file_list=None, pattern=None):\n # each file name corresponds to another date. Also tools (A, B) and others.\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.csv')]\n if len(input_paths) == 0:\n raise Exception(\"No .csv files found using pattern: '{}'\".format(pattern))\n\n if self.n_proc > 1:\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n all_df = pd.concat(pool.map(WeldData.load_single, input_paths))\n else: # read 1 file at a time\n all_df = pd.concat(WeldData.load_single(path) for path in input_paths)\n\n return all_df", "def _merge_records(group: List[ParseResult]):\n\n # Group the file list and parsers\n group_files = list(set(sum([tuple(x.group) for x in group], ())))\n group_parsers = '-'.join(sorted(set(sum([[x.parser] for x in group], []))))\n\n # Merge the metadata\n is_list = [isinstance(x.metadata, list) for x in group]\n if sum(is_list) > 1:\n raise NotImplementedError('We have not defined how to merge >1 list-type data')\n elif sum(is_list) == 1:\n list_data = group[is_list.index(True)].metadata\n if len(is_list) > 1:\n other_metadata = reduce(_merge_func,\n [x.metadata for x, t in zip(group, is_list) if not t])\n group_metadata = [_merge_func(x, other_metadata) for x in list_data]\n else:\n group_metadata = list_data\n else:\n group_metadata = reduce(_merge_func, [x.metadata for x in group])\n return ParseResult(group_files, group_parsers, group_metadata)", "def _merge_files(parse_results: Iterable[ParseResult]) -> Iterable[ParseResult]:\n return map(_merge_records, groupby_file(parse_results))", "def data_frame_creator(self):\n sequence_folder = [\n '/SEQ1', '/SEQ2', '/SEQ3', '/SEQ4', '/SEQ5', '/SEQ6'\n ]\n rgb_folder = ['/RGBLeft/', '/RGBRight/']\n depth_folder = ['/DepthLeft/', '/DepthRight/']\n segmentation_folder = ['/GTLeft/', '/GTright/']\n rgb_dir = [\n self.dataset_dir + sequence_f + rgb_f for rgb_f in rgb_folder\n for sequence_f in sequence_folder\n ]\n rgb_data = [\n rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)\n ]\n\n depth_dir = [\n self.dataset_dir + sequence_f + depth_f\n for depth_f in depth_folder\n for sequence_f in sequence_folder\n ]\n depth_data = [\n depth_d + depth for depth_d in depth_dir\n for depth in os.listdir(depth_d)\n ]\n\n segmentation_dir = [\n self.dataset_dir + sequence_f + segmentation_f\n for segmentation_f in segmentation_folder\n for sequence_f in sequence_folder\n ]\n segmentation_data = [\n segmentation_d + segmentation\n for segmentation_d in segmentation_dir\n for segmentation in os.listdir(segmentation_d)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'DEPTH': depth_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1, random_state=123)\n\n return pd.DataFrame(dataset)", "def create_dict_with_all_df(path: str) -> Dict[str, pd.DataFrame]:\n all_zips = glob.glob(os.path.join(path, \"*.zip\"))\n df_dict = {}\n for zip_ in all_zips:\n dfs = _return_dfs_from_zipfolder(zip_)\n # df_dict |= dfs # will work with Python 3.9\n df_dict = {**df_dict, **dfs}\n return df_dict", "def load_all(self, root_dir, file_list=None, pattern=None):\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.csv')]\n if len(input_paths) == 0:\n raise Exception(\"No .csv files found using pattern: '{}'\".format(pattern))\n\n if self.n_proc > 1:\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n all_df = pd.concat(pool.map(PMUData.load_single, input_paths))\n else: # read 1 file at a time\n all_df = pd.concat(PMUData.load_single(path) for path in input_paths)\n\n return all_df", "def load_all(self, root_dir, file_list=None, pattern=None):\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.ts')]\n if len(input_paths) == 0:\n raise Exception(\"No .ts files found using pattern: '{}'\".format(pattern))\n\n all_df, labels_df = self.load_single(input_paths[0]) # a single file contains dataset\n\n return all_df, labels_df", "def loadObjects(basePath, snapNum, gName, nName, fields):\n result = {}\n\n # make sure fields is not a single element\n if isinstance(fields, six.string_types):\n fields = [fields]\n\n # load header from first chunk\n with h5py.File(gcPath(basePath, snapNum), 'r') as f:\n\n header = dict(f['Header'].attrs.items())\n result['count'] = f['Header'].attrs['N' + nName + '_Total']\n\n if not result['count']:\n print('warning: zero groups, empty return (snap=' + str(snapNum) + ').')\n return result\n\n # if fields not specified, load everything\n if not fields:\n fields = list(f[gName].keys())\n\n for field in fields:\n # verify existence\n if field not in f[gName].keys():\n raise Exception(\"Group catalog does not have requested field [\" + field + \"]!\")\n\n # replace local length with global\n shape = list(f[gName][field].shape)\n shape[0] = result['count']\n\n # allocate within return dict\n result[field] = np.zeros(shape, dtype=f[gName][field].dtype)\n\n # loop over chunks\n wOffset = 0\n\n for i in range(header['NumFiles']):\n f = h5py.File(gcPath(basePath, snapNum, i), 'r')\n\n if not f['Header'].attrs['N'+nName+'_ThisFile']:\n continue # empty file chunk\n\n # loop over each requested field\n for field in fields:\n if field not in f[gName].keys():\n raise Exception(\"Group catalog does not have requested field [\" + field + \"]!\")\n\n # shape and type\n shape = f[gName][field].shape\n\n # read data local to the current file\n if len(shape) == 1:\n result[field][wOffset:wOffset+shape[0]] = f[gName][field][0:shape[0]]\n else:\n result[field][wOffset:wOffset+shape[0], :] = f[gName][field][0:shape[0], :]\n\n wOffset += shape[0]\n f.close()\n\n # only a single field? then return the array instead of a single item dict\n if len(fields) == 1:\n return result[fields[0]]\n\n return result", "def get_data(directory):\n dictlist = []\n cols = ['title', 'text', 'authors', 'num_images', 'domain', 'url']\n\n folders = glob.glob(directory + '/*')\n for index, subdir in enumerate(folders):\n\n file_path = glob.glob(subdir + '/*')\n\n #check if glob returned a valid file path (non-empty list)\n if len(file_path) == 1:\n file = open(file_path[0]).read()\n jsondata = json.loads(file)\n dictlist.append(scaledict(jsondata))\n return pd.DataFrame(dictlist, columns=cols)", "def generate_submission_datafiles_data(submission_id=str()):\n\n columns = list()\n data_set = list()\n\n columns.append(dict(className='summary-details-control detail-hover-message', orderable=False, data=None,\n title='', defaultContent='', width=\"5%\"))\n columns.append(dict(data=\"record_id\", visible=False))\n columns.append(dict(data=\"name\", title=\"Name\"))\n\n try:\n submission_record = Submission().get_record(submission_id)\n except:\n return dict(dataSet=data_set,\n columns=columns\n )\n\n submission_record = Submission().get_record(submission_id)\n bundle = submission_record.get(\"bundle\", list())\n datafile_object_list = [ObjectId(datafile_id) for datafile_id in bundle]\n\n projection = dict(name=1)\n filter_by = dict()\n filter_by[\"_id\"] = {'$in': datafile_object_list}\n\n records = DataFile().get_all_records_columns(sort_by='date_created', sort_direction=1, projection=projection,\n filter_by=filter_by)\n\n if len(records):\n df = pd.DataFrame(records)\n df['s_n'] = df.index\n\n df['record_id'] = df._id.astype(str)\n df[\"DT_RowId\"] = df.record_id\n df.DT_RowId = 'row_' + df.DT_RowId\n df = df.drop('_id', axis='columns')\n\n data_set = df.to_dict('records')\n\n return dict(dataSet=data_set,\n columns=columns\n )", "def h5_to_dict(grp, **kwargs):\n data = {}\n for key in grp.keys():\n try:\n e_key = eval(key, {})\n except:\n e_key = key\n\n data[e_key] = h5_to_data(grp[key], **kwargs)\n \n return data", "def get_dfs_from_fred(self):\n\t\tdict_df = {}\n\t\tif self.data_array is not None:\n\t\t\tfred_api_calls = {\n\t\t\t'Real GDP':'GDPC1', 'Federal Funds Rate':'FEDFUNDS',\n\t\t\t'10 YR Treasury':'DGS10', 'S&P500':'SP500', 'Moody Baa Yield':'BAA',\n\t\t\t'Moody Aaa Yield':'AAA', '30 Year Mortgage':'MORTGAGE30US',\n\t\t\t'Debt Percent GDP':'GFDEGDQ188S'\n\t\t\t}\n\t\t\tfor data in self.data_array:\n\t\t\t\tapi_call = fred_api_calls[data]\n\t\t\t\tdict_df[data] = self.fred.series.observations(api_call).set_index('date')\n\t\t\treturn dict_df\n\t\tif self.test_dict is not None:\n\t\t\tpath = self.test_dict['path']\n\t\t\tfor data in self.test_dict['data_array']:\n\t\t\t\tdf_path = os.path.join(path, data)\n\t\t\t\tdict_df[data] = pd.read_csv(df_path, index_col='date')\n\t\t\treturn dict_df", "def load_data(data_dir=DATA_DIR, data_ext=DATA_EXT):\n data = {}\n for f in os.listdir(data_dir):\n if f.endswith(data_ext):\n name = f.split('.')[0]\n df = pd.read_csv(f)\n data[name] = df\n return data", "def load_all(self, root_dir, file_list=None, pattern=None, mode=None):\n\n # if func is None:\n # func = SemicondTraceData.load_single\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.csv')]\n if len(input_paths) == 0:\n raise Exception(\"No .csv files found using pattern: '{}'\".format(pattern))\n\n if (mode != 'simple') and (self.n_proc > 1):\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n # done like this because multiprocessing needs the *explicit* function call\n # and not a reference to a function, e.g. func = pd.read_csv\n all_df = pd.concat(pool.map(SemicondTraceData.load_single, input_paths))\n else: # read 1 file at a time\n if mode == 'simple':\n all_df = pd.concat(pd.read_csv(path) for path in tqdm(input_paths))\n else:\n all_df = pd.concat(SemicondTraceData.load_single(path) for path in tqdm(input_paths))\n\n return all_df", "def generateDataFrame(self):\n labelArray = []\n\n # At this level ignored files are excluded\n for item in self.added:\n self.folderTree.append(item)\n\n for item in self.folderTree:\n if item in self.modified:\n labelArray.append('modified')\n elif item in self.deleted:\n labelArray.append('deleted')\n elif item in self.ignored:\n labelArray.append('ignored')\n elif item in self.added:\n labelArray.append('added')\n else:\n labelArray.append('baseFile')\n\n df = pd.DataFrame(list(zip(self.folderTree, labelArray)), \\\n columns=['File', 'Type'])\n self.fileDataFrame = df", "def extract_fea_for_datagroup(self, data_group, mode='train'):\n if mode == 'train':\n fp = open(self.train_path, 'r')\n else:\n fp = open(self.test_path, 'r')\n\n for i, line in tqdm(enumerate(fp)):\n audio_name, label = line.split()\n audio_path = os.path.join(self.dev_path, audio_name)\n fea = self.extract_logmel(wav_path=audio_path)\n\n wav_name = os.path.basename(audio_path)\n data_group[wav_name] = fea\n data_group[wav_name].attrs['label'] = label\n data_group[wav_name].attrs['venue'] = wav_name.split('-')[1]\n data_group[wav_name].attrs['device'] = wav_name.split('-')[4][0]\n # label could be extracted by :data_group[u'airport-barcelona-0-0-a.wav'].attrs['label']", "def merge_dfs(files, convert_numeric=False):\n\n dfs = []\n for f in files:\n df = getdf(f, convert_numeric=convert_numeric)\n dfs.append(df)\n # dfs = pd.concat(dfs, join='outer', axis=1)\n return dfs", "def looper(path2mdbs, tablename, csv=False):\n containing_folder = path2mdbs\n contained_files = os.listdir(containing_folder)\n df_dictionary={}\n\n count = 1\n basestring = 'file_'\n for i in contained_files:\n if os.path.splitext(os.path.join(containing_folder,i))[1]=='.mdb' or os.path.splitext(os.path.join(containing_folder,i))[1]=='.accdb':\n countup = basestring+str(count)\n # df creation/manipulation starts here\n print(i)\n df = main_translate(tablename,os.path.join(containing_folder,i))\n if df is not None:\n if 'DateLoadedInDB' in df.columns:\n df['DateLoadedInDB']=df['DateLoadedInDB'].astype('datetime64')\n df['DateLoadedInDB'] = datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n else:\n df['DateLoadedInDB'] = datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n\n df['DBKey'] = os.path.split(os.path.splitext(i)[0])[1].replace(\" \",\"\")\n # df add to dictionary list\n df_dictionary[countup] = df.copy()\n else:\n pass\n count+=1\n final_df = pd.concat([j for i,j in df_dictionary.items()], ignore_index=True).drop_duplicates()\n\n return final_df if csv==False else final_df.to_csv(os.path.join(containing_folder,tablename+'.csv'))", "def group_data():\n\n # Merge on Departure.\n\n # Merge on Arrival.\n\n data = pd.read_csv(path + \"/data/public/public_train.csv\")[[\"DateOfDeparture\", \"Arrival\"]]\n data['DateOfDeparture'] = pd.to_datetime(data['DateOfDeparture'])\n\n arrival = join_cleaned_data().\\\n rename(columns={'Date': 'DateOfDeparture', 'Airport': 'Arrival'}).\\\n set_index(\"DateOfDeparture\")\n\n merged_arrv = pd.merge(data, arrival, on=[\"DateOfDeparture\", \"Arrival\"], how=\"left\")\n\n # Rename and drop columns.\n\n merged_arrv.columns = [c + \"_Arrival\" if c not in [\"DateOfDeparture\",\n \"DateOfArrival\",\n \"Arrival\",\n \"WeeksToDeparture\"]\n else c\n for c in merged_arrv.columns]\n print merged_arrv\n merged_arrv = merged_arrv.drop([\"Arrival\"], axis=1)\n\n # Concatenate the two fields.\n # merged_all = pd.concat([merged_arrv, merged_dept], axis=1)\n\n merged_all = merged_arrv.\\\n convert_objects(convert_numeric=True)\n merged_all.to_csv(path + \"/Submission/temperatures.csv\")", "def merge_dfs(raw_dfs_dict: Dict[str, pd.DataFrame]) -> \\\n (pd.DataFrame, Dict[str, pd.DataFrame]):\n\n # List out the dfs available, and make copies of all dfs\n raw_dfs_list = raw_dfs_dict.keys()\n dfs_dict = {}\n for df in raw_dfs_list:\n dfs_dict[df] = raw_dfs_dict[df].copy()\n\n # Run the custom cleaning functions on the dataframes that need them\n dfs_dict['googletrend.csv'] = \\\n clean_googletrend_csv(dfs_dict['googletrend.csv'])\n dfs_dict['store.csv'] = \\\n clean_store_csv(dfs_dict['store.csv'])\n dfs_dict['weather.csv'] = \\\n clean_weather_csv(dfs_dict['weather.csv'])\n\n # Run generic 'clean_other_dfs' function on the other dataframes\n dfs_dict['state_names.csv'] = \\\n clean_other_dfs(dfs_dict['state_names.csv'])\n dfs_dict['store_states.csv'] = \\\n clean_other_dfs(dfs_dict['store_states.csv'])\n dfs_dict['train.csv'] = \\\n clean_other_dfs(dfs_dict['train.csv'])\n\n # Start by merging store_states and state_names\n df = dfs_dict['store_states.csv'].merge(dfs_dict['state_names.csv'],\n on='state')\n # Add in weather\n df = df.merge(dfs_dict['weather.csv'],\n left_on='state_name', right_on='file')\n\n # Drop file and state_name - they are colinear with 'state'\n df.drop(['file', 'state_name'], axis='columns', inplace=True)\n\n # Add in store\n df = df.merge(dfs_dict['store.csv'], on='store')\n\n # Add in train - note that since train.csv has some missing dates, where\n # the store was apparently closed, we use 'outer' to capture all the dates\n df = df.merge(dfs_dict['train.csv'], on=['date', 'store'], how='outer')\n\n # Add in googletrend, making sure to coerce 'date' to datetime first\n df['date'] = pd.to_datetime(df['date'])\n df = df.merge(dfs_dict['googletrend.csv'], on=['date', 'state'])\n\n # final cleanup\n df.loc[df.open.isnull(), 'open'] = 0\n df.loc[df.sales.isnull(), 'sales'] = 0\n df.loc[df.customers.isnull(), 'customers'] = 0\n df.loc[df.promo.isnull(), 'promo'] = 0\n df.loc[df.school_holiday.isnull(), 'school_holiday'] = 0\n df.loc[df.state_holiday.isnull(), 'state_holiday'] = '0'\n df['day_of_week'] = df.date.dt.dayofweek\n df.loc[df.customers == 0, 'open'] = 0\n df['date'] = pd.to_datetime(df['date'])\n df['week_start'] = pd.to_datetime(df['week_start'])\n df.loc[df.open == 0, 'promo'] = 0\n\n new_dict = {}\n for k, v in dfs_dict.items():\n new_dict[k] = v\n return (df, new_dict)", "def _get_files(\n self,\n data_root,\n data_subset=\"full/*0\",\n signal_subset=\"*\",\n noise_subset=\"*\",\n data_type=\"raw\",\n noise_type=\"stationary\",\n noise_type_sim=None,\n mask_type=\"hitsmask_tailored\",\n signal_type=\"r0p03\",\n signal_type_sim=None,\n signal_transfer_type=None,\n suffix=\"\",\n foreground_type_sim=None,\n template_type=None,\n sub_planck=False,\n ):\n\n if signal_transfer_type is None:\n signal_transfer_type = signal_type\n\n # regularize data root\n if not os.path.exists(data_root):\n raise OSError(\"Missing data root {}\".format(data_root))\n\n # find all map files\n map_root = os.path.join(data_root, \"data_{}\".format(data_type))\n map_files = []\n data_subset = data_subset.split(\",\")\n for f in np.atleast_1d(data_subset):\n files = glob.glob(os.path.join(map_root, \"{}.fits\".format(f)))\n if not len(files):\n raise OSError(\"Missing files in data subset {}\".format(f))\n map_files.extend(files)\n data_subset = \",\".join(data_subset)\n map_files = sorted(map_files)\n map_files = [f for f in map_files if os.path.basename(f).startswith(\"map_\")]\n map_tags = [\n os.path.splitext(os.path.basename(f))[0].split(\"_\", 1)[1] for f in map_files\n ]\n map_freqs = []\n for t in map_tags:\n # if map tag is not a plain frequency, extract plain frequency\n map_freqs.append(self.dict_freqs[t])\n self.log(\"Found {} map files in {}\".format(len(map_files), map_root), \"info\")\n self.log(\"Map files: {}\".format(map_files), \"debug\")\n self.log(\"Map freqs: {}\".format(map_freqs), \"debug\")\n\n raw_root = None\n raw_files = None\n # find all corresponding signal sims\n signal_root = os.path.join(data_root, \"signal_{}\".format(signal_type))\n num_signal = None\n signal_files = []\n for f in map_files:\n sfiles = sorted(\n glob.glob(\n f.replace(map_root, signal_root).replace(\n \".fits\", \"_{}.fits\".format(signal_subset)\n )\n )\n )\n nsims1 = len(sfiles)\n if not nsims1:\n raise OSError(\"Missing signal sims for {}\".format(f))\n if num_signal is None:\n num_signal = nsims1\n else:\n if nsims1 != num_signal:\n raise OSError(\n \"Found {} signal sims for map {}, expected {}\".format(\n nsims1, f, num_signal\n )\n )\n num_signal = min(num_signal, nsims1)\n signal_files.append(sfiles)\n signal_files = np.asarray([x[:num_signal] for x in signal_files])\n self.log(\"Found {} signal sims in {}\".format(num_signal, signal_root), \"info\")\n self.log(\n \"First signal sim files: {}\".format(signal_files[:, 0].tolist()), \"debug\"\n )\n\n # find all corresponding signal transfer function sims\n signal_transfer_root = os.path.join(\n data_root, \"signal_{}\".format(signal_transfer_type)\n )\n num_signal_transfer = None\n signal_transfer_files = []\n for f in map_files:\n sfiles = sorted(\n glob.glob(\n f.replace(map_root, signal_transfer_root).replace(\n \".fits\", \"_{}.fits\".format(signal_subset)\n )\n )\n )\n nsims1 = len(sfiles)\n if not nsims1:\n raise OSError(\"Missing signal sims for {}\".format(f))\n if num_signal_transfer is None:\n num_signal_transfer = nsims1\n else:\n if nsims1 != num_signal_transfer:\n raise OSError(\n \"Found {} signal_transfer sims for map {}, expected {}\".format(\n nsims1, f, num_signal_transfer\n )\n )\n num_signal_transfer = min(num_signal_transfer, nsims1)\n signal_transfer_files.append(sfiles)\n signal_transfer_files = np.asarray(\n [x[:num_signal_transfer] for x in signal_transfer_files]\n )\n self.log(\n \"Found {} signal transfer sims in {}\".format(\n num_signal_transfer, signal_transfer_root\n ),\n \"info\",\n )\n self.log(\n \"First signal transfer sim files: {}\".format(\n signal_transfer_files[:, 0].tolist()\n ),\n \"debug\",\n )\n\n # find all corresponding noise sims\n if noise_type is not None:\n noise_root = os.path.join(data_root, \"noise_{}\".format(noise_type))\n num_noise = None\n noise_files = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, noise_root).replace(\n \".fits\", \"_{}.fits\".format(noise_subset)\n )\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing noise sims for {}\".format(f))\n if num_noise is None:\n num_noise = nsims1\n else:\n if nsims1 != num_noise:\n raise OSError(\n \"Found {} noise sims for map {}, expected {}\".format(\n nsims1, f, num_noise\n )\n )\n num_noise = min(num_noise, nsims1)\n noise_files.append(nfiles)\n noise_files = np.asarray([x[:num_noise] for x in noise_files])\n self.log(\"Found {} noise sims in {}\".format(num_noise, noise_root), \"info\")\n self.log(\n \"First noise sim files: {}\".format(noise_files[:, 0].tolist()), \"debug\"\n )\n else:\n noise_root = None\n noise_files = None\n\n # find all corresponding noise sims for sim_index run\n if noise_type_sim is not None:\n noise_root_sim = os.path.join(data_root, \"noise_{}\".format(noise_type_sim))\n num_noise_sim = None\n noise_files_sim = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, noise_root_sim).replace(\n \".fits\", \"_{}.fits\".format(noise_subset)\n )\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing noise sims for {}\".format(f))\n if num_noise_sim is None:\n num_noise_sim = nsims1\n else:\n if nsims1 != num_noise_sim:\n raise OSError(\n \"Found {} noise sims for map {}, expected {}\".format(\n nsims1, f, num_noise_sim\n )\n )\n num_noise_sim = min(num_noise_sim, nsims1)\n noise_files_sim.append(nfiles)\n noise_files_sim = np.asarray(noise_files_sim)\n self.log(\n \"Found {} noise sims in {}\".format(num_noise_sim, noise_root_sim),\n \"info\",\n )\n self.log(\n \"First noise sim files: {}\".format(noise_files_sim[:, 0].tolist()),\n \"debug\",\n )\n else:\n noise_root_sim = noise_root\n noise_files_sim = noise_files\n\n # find all corresponding signal sims for sim_index run\n if signal_type_sim is not None:\n signal_root_sim = os.path.join(\n data_root, \"signal_{}\".format(signal_type_sim)\n )\n num_signal_sim = None\n signal_files_sim = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, signal_root_sim).replace(\n \".fits\", \"_{}.fits\".format(signal_subset)\n )\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing signal sims for {}\".format(f))\n if num_signal_sim is None:\n num_signal_sim = nsims1\n else:\n if nsims1 != num_signal_sim:\n raise OSError(\n \"Found {} signal sims for map {}, expected {}\".format(\n nsims1, f, num_signal_sim\n )\n )\n num_signal_sim = min(num_signal_sim, nsims1)\n signal_files_sim.append(nfiles)\n signal_files_sim = np.asarray(signal_files_sim)\n self.log(\n \"Found {} signal sims in {}\".format(num_signal_sim, signal_root_sim),\n \"info\",\n )\n self.log(\n \"First signal sim files: {}\".format(signal_files_sim[:, 0].tolist()),\n \"debug\",\n )\n else:\n signal_root_sim = signal_root\n signal_files_sim = signal_files\n\n # find all corresponding foreground sims for sim_index run\n if foreground_type_sim is not None:\n foreground_root = os.path.join(\n data_root, \"foreground_{}\".format(foreground_type_sim)\n )\n num_foreground_sim = None\n foreground_files = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, foreground_root).replace(\".fits\", \"_*.fits\")\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing foreground sims for {}\".format(f))\n if num_foreground_sim is None:\n num_foreground_sim = nsims1\n else:\n if nsims1 != num_foreground_sim:\n raise OSError(\n \"Found {} foreground sims for map {}, expected {}\".format(\n nsims1, f, num_foreground_sim\n )\n )\n num_foreground_sim = min(num_foreground_sim, nsims1)\n foreground_files.append(nfiles)\n foreground_files = np.asarray(\n [x[:num_foreground_sim] for x in foreground_files]\n )\n self.log(\n \"Found {} foreground sims in {}\".format(\n num_foreground_sim, foreground_root\n ),\n \"info\",\n )\n self.log(\n \"First foreground sim files: {}\".format(\n foreground_files[:, 0].tolist()\n ),\n \"debug\",\n )\n else:\n foreground_root = None\n foreground_files = None\n\n # find all corresponding masks\n if mask_type is None:\n raise ValueError(\"Argument mask_type required\")\n # If mask is a fits file, use the same mask for all maps\n if os.path.splitext(mask_type)[1] == \".fits\":\n if os.path.exists(mask_type):\n # it's an absolute path\n mask_files = np.tile(mask_type, len(map_tags))\n mask_root = os.path.dirname(mask_type)\n else:\n # it's relative to base directory structure\n mask_files = np.tile(os.path.join(data_root, mask_type), len(map_tags))\n mask_root = os.path.dirname(os.path.join(data_root, mask_type))\n else:\n mask_root = os.path.join(data_root, \"masks_{}\".format(mask_type))\n # XXX Do this smarter\n mask_files = [\n os.path.join(mask_root, \"mask_map_{}.fits\".format(tag))\n for tag in map_tags\n ]\n for f in mask_files:\n if not os.path.exists(f):\n raise OSError(\"Missing mask file {}\".format(f))\n self.log(\"Found {} masks in {}\".format(len(mask_files), mask_root), \"info\")\n self.log(\"Mask files: {}\".format(mask_files), \"debug\")\n\n # Also need a list of unique map tags for populating dictionaries\n # in data structures\n map_tags_orig = list(map_tags) # copy\n map_tags = pt.unique_tags(map_tags)\n\n # make a list of names corresponding to the order of the cross spectra\n map_pairs = pt.tag_pairs(map_tags)\n map_pairs_orig = pt.tag_pairs(map_tags, index=map_tags_orig)\n\n # make a dictionary of map freqs for each unique map tag\n map_freqs_dict = {}\n for im0, m0 in enumerate(map_tags):\n map_freqs_dict[m0] = map_freqs[im0]\n map_freqs = map_freqs_dict\n\n fields = [\n \"data_root\",\n \"data_subset\",\n \"map_root\",\n \"map_files\",\n \"map_tags\",\n \"map_pairs\",\n \"map_tags_orig\",\n \"map_pairs_orig\",\n \"map_freqs\",\n \"raw_root\",\n \"raw_files\",\n \"signal_root\",\n \"signal_files\",\n \"signal_root_sim\",\n \"signal_files_sim\",\n \"signal_transfer_root\",\n \"signal_transfer_files\",\n \"noise_root\",\n \"noise_files\",\n \"noise_root_sim\",\n \"noise_files_sim\",\n \"mask_root\",\n \"mask_files\",\n \"foreground_root\",\n \"foreground_files\",\n ]\n out = dict()\n local = locals()\n for f in fields:\n out[f + suffix] = local[f]\n return out", "def prepare_dataset() -> Tuple[pd.DataFrame, Dict]:\n\n data_dir = Path.cwd()/\"freiburg_grocery_images\"\n labels = [directory.name for directory in data_dir.iterdir()]\n label_map = {label: i for i, label in enumerate(labels)}\n\n all_items = [str(file) for label in labels for file in (data_dir/label).iterdir()]\n labels_of_items = [label for label in labels for file in (data_dir/label).iterdir()]\n\n df = pd.DataFrame({\"Image\": all_items, \"Label\": labels_of_items})\n return df, label_map", "def CollectDatasets(redirector_str):\n \n \n # uploadDir = 'srv/' for lpcjobqueue shell or TTbarAllHadUproot/ for coffea casa and WinterFell\n \n if 'cmsxrootd' in redirector_str:\n uploadDir = 'srv'\n else:\n uploadDir = 'TTbarAllHadUproot'\n \n uploadDir = ''\n \n filedir = 'nanoAODv9Files/'\n Years = ['UL16', 'UL17', 'UL18']\n VFP = ['preVFP', 'postVFP'] # preVFP unavailable in Winterfell for the moment\n # VFP = ['postVFP'] # Only for simple test in WinterFell\n filesets = {} # To be filled and returned by this function\n \n # ---- Before concatenation with +=, lists should be declard ---- # \n \n for y in Years:\n if '16' in y:\n for v in VFP:\n filesets[y+v+'_QCD'] = []\n filesets[y+v+'_TTbar_700_1000'] = []\n filesets[y+v+'_TTbar_1000_Inf'] = []\n # ---- JetHT and SingleMu ---- #\n for l in ['', 'B', 'C', 'D', 'E', 'F']:\n filesets[y+'preVFP_JetHT'+l+'_Data'] = []\n filesets[y+'preVFP_SingleMu'+l+'_Data'] = []\n for l in ['', 'F', 'G', 'H']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n \n elif '17' in y:\n filesets[y+'postVFP_QCD'] = []\n filesets[y+'postVFP_TTbar'] = []\n for l in ['', 'B', 'C', 'D', 'E', 'F']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n else:\n filesets[y+'postVFP_QCD'] = []\n filesets[y+'postVFP_TTbar'] = []\n for l in ['', 'A', 'B', 'C', 'D']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n # ---- Loop through years and VFP status, filling the filesets dictionary with the MC file locations from corresponding txt files ---- #\n \n for y in Years:\n if '16' in y:\n for v in VFP:\n # ---- QCD ---- #\n ulqcdfilename = filedir + 'QCD/QCD_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulqcdfilename) as f:\n ulqcdfiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_QCD'] += ulqcdfiles\n \n # ---- TTbar ---- #\n ulttbar700to1000filename = filedir + 'TT/TT_Mtt-700to1000_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulttbar700to1000filename) as f:\n ulttbar700to1000files = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n ulttbar1000toInffilename = filedir + 'TT/TT_Mtt-1000toInf_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulttbar1000toInffilename) as f:\n ulttbar1000toInffiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_TTbar_700_1000'] += ulttbar700to1000files\n filesets[y+v+'_TTbar_1000_Inf'] += ulttbar1000toInffiles\n \n # ---- JetHT ---- #\n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist:\n if 'pre' in v:\n if 'Run2016' in filename: #preVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2016 = [redirector_str + s.strip() for s in f.readlines() if ('HIPM' in s and not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2016 \n elif 'post' in v:\n if 'Run2016' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2016 = [redirector_str + s.strip() for s in f.readlines() if ('HIPM' not in s and not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2016\n \n # ---- Z' Dark Matter Mediator ---- #\n ulZprimeDMfilename = filedir + 'ZprimeDMToTTbar/ZprimeDMToTTbar_NanoAODv9_' + y + '_' + v + '.txt'\n ulDMfiles=[]\n k=0\n for i in range(1000, 5500, 500):\n with open(ulZprimeDMfilename) as f:\n ulDMfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"ResoIncl_MZp\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_DM'+str(i)] = ulDMfiles[k]\n k += 1\n \n# # ---- RS KK Gluon ---- #\n# ulRSGluonfilename = filedir + 'RSGluonToTT/RSGluonToTT_NanoAODv9_' + y + '_' + v + '.txt'\n# ulRSGluonfiles=[]\n# l=0\n# for i in range(1000, 5500, 500):\n# with open(ulRSGluonfilename) as f:\n# ulRSGluonfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"RSGluonToTT_M-\"+str(i) in s and not s.startswith('#'))])\n# filesets[y+v+'_RSGluon'+str(i)] += ulRSGluonfiles[l]\n# l += 1\n \n else: # UL17 and UL18\n v = VFP[1] # No preVFP after 2016 Run vertex problem was fixed\n \n # ---- QCD ---- #\n ulqcdfilename = filedir + 'QCD/QCD_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulqcdfilename) as f:\n ulqcdfiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_QCD'] += ulqcdfiles\n\n# # ---- TTbar ---- #\n# ulttbar700to1000filename = filedir + 'TT/TT_Mtt-700to1000_NanoAODv9_' + y + '_' + v + '.txt'\n# with open(ulttbar700to1000filename) as f:\n# ulttbar700to1000files = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n# ulttbar1000toInffilename = filedir + 'TT/TT_Mtt-1000toInf_NanoAODv9_' + y + '_' + v + '.txt'\n# with open(ulttbar1000toInffilename) as f:\n# ulttbar1000toInffiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n# filesets[y+v+'_TTbar_700_1000'] += ulttbar700to1000files\n# filesets[y+v+'_TTbar_1000_Inf'] += ulttbar1000toInffiles\n \n # ---- JetHT ---- #\n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist: \n if 'Run2017' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2017 = [redirector_str + s.strip() for s in f.readlines() if (not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2017\n elif 'Run2018' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2018 = [redirector_str + s.strip() for s in f.readlines() if (not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2018\n\n # ---- Z' Dark Matter Mediator ---- #\n ulZprimeDMfilename = filedir + 'ZprimeDMToTTbar/ZprimeDMToTTbar_NanoAODv9_' + y + '_' + v + '.txt'\n ulDMfiles=[]\n k=0\n for i in range(1000, 5500, 500):\n with open(ulZprimeDMfilename) as f:\n ulDMfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"ResoIncl_MZp\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_DM'+str(i)] = ulDMfiles[k]\n k += 1\n \n # ---- RS KK Gluon ---- #\n ulRSGluonfilename = filedir + 'RSGluonToTT/RSGluonToTT_NanoAODv9_' + y + '_' + v + '.txt'\n ulRSGluonfiles=[]\n l=0\n for i in range(1000, 5500, 500):\n with open(ulRSGluonfilename) as f:\n ulRSGluonfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"RSGluonToTT_M-\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_RSGluon'+str(i)] = ulRSGluonfiles[l]\n l += 1\n \n \n # ---- JetHT Eras---- #\n \n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist:\n \n if 'Run2016B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2016b = [redirector_str + s.strip() for s in b.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTB_Data'] += jetdatafiles2016b\n elif 'Run2016C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2016c = [redirector_str + s.strip() for s in c.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTC_Data'] += jetdatafiles2016c\n elif 'Run2016D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2016d = [redirector_str + s.strip() for s in d.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTD_Data'] += jetdatafiles2016d\n elif 'Run2016E' in filename:\n with open(filedir + 'JetHT/' + filename) as e:\n jetdatafiles2016e = [redirector_str + s.strip() for s in e.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTE_Data'] += jetdatafiles2016e\n elif 'Run2016F' in filename:\n with open(filedir + 'JetHT/' + filename) as fold:\n jetdatafiles2016fold = [redirector_str + s.strip() for s in fold.readlines() if ('HIPM' in s and not s.startswith('#'))]\n with open(filedir + 'JetHT/' + filename) as fnew:\n jetdatafiles2016fnew = [redirector_str + s.strip() for s in fnew.readlines() if ('HIPM' not in s and not s.startswith('#'))]\n filesets['UL16preVFP_JetHTF_Data'] += jetdatafiles2016fold\n filesets['UL16postVFP_JetHTF_Data'] += jetdatafiles2016fnew\n elif 'Run2016G' in filename:\n with open(filedir + 'JetHT/' + filename) as g:\n jetdatafiles2016g = [redirector_str + s.strip() for s in g.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_JetHTG_Data'] += jetdatafiles2016g\n elif 'Run2016H' in filename:\n with open(filedir + 'JetHT/' + filename) as h:\n jetdatafiles2016h = [redirector_str + s.strip() for s in h.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_JetHTH_Data'] += jetdatafiles2016h\n \n if 'Run2017B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2017b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTB_Data'] += jetdatafiles2017b\n elif 'Run2017C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2017c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTC_Data'] += jetdatafiles2017c\n elif 'Run2017D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2017d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTD_Data'] += jetdatafiles2017d\n elif 'Run2017E' in filename:\n with open(filedir + 'JetHT/' + filename) as e:\n jetdatafiles2017e = [redirector_str + s.strip() for s in e.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTE_Data'] += jetdatafiles2017e\n elif 'Run2017F' in filename:\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2017f = [redirector_str + s.strip() for s in f.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTF_Data'] += jetdatafiles2017f\n \n if 'Run2018A' in filename:\n with open(filedir + 'JetHT/' + filename) as a:\n jetdatafiles2018a = [redirector_str + s.strip() for s in a.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTA_Data'] += jetdatafiles2018a\n elif 'Run2018B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2018b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTB_Data'] += jetdatafiles2018b\n elif 'Run2018C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2018c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTC_Data'] += jetdatafiles2018c\n elif 'Run2018D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2018d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTD_Data'] += jetdatafiles2018d\n \n\n \n # ---- Single Muon ---- #\n datafilelist = os.listdir(filedir + 'SingleMu/')\n for filename in datafilelist:\n \n if 'Run2016B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2016b = [redirector_str + s.strip() for s in b.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuB_Data'] += jetdatafiles2016b\n elif 'Run2016C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2016c = [redirector_str + s.strip() for s in c.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuC_Data'] += jetdatafiles2016c\n elif 'Run2016D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2016d = [redirector_str + s.strip() for s in d.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuD_Data'] += jetdatafiles2016d\n elif 'Run2016E' in filename:\n with open(filedir + 'SingleMu/' + filename) as e:\n jetdatafiles2016e = [redirector_str + s.strip() for s in e.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuE_Data'] += jetdatafiles2016e\n elif 'Run2016F' in filename:\n with open(filedir + 'SingleMu/' + filename) as fold:\n jetdatafiles2016fold = [redirector_str + s.strip() for s in fold.readlines() if ('HIPM' in s and not s.startswith('#'))]\n with open(filedir + 'SingleMu/' + filename) as fnew:\n jetdatafiles2016fnew = [redirector_str + s.strip() for s in fnew.readlines() if ('HIPM' not in s and not s.startswith('#'))]\n filesets['UL16preVFP_SingleMuF_Data'] += jetdatafiles2016fold\n filesets['UL16postVFP_SingleMuF_Data'] += jetdatafiles2016fnew\n elif 'Run2016G' in filename:\n with open(filedir + 'SingleMu/' + filename) as g:\n jetdatafiles2016g = [redirector_str + s.strip() for s in g.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_SingleMuG_Data'] += jetdatafiles2016g\n elif 'Run2016H' in filename:\n with open(filedir + 'SingleMu/' + filename) as h:\n jetdatafiles2016h = [redirector_str + s.strip() for s in h.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_SingleMuH_Data'] += jetdatafiles2016h\n \n if 'Run2017B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2017b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuB_Data'] += jetdatafiles2017b\n elif 'Run2017C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2017c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuC_Data'] += jetdatafiles2017c\n elif 'Run2017D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2017d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuD_Data'] += jetdatafiles2017d\n elif 'Run2017E' in filename:\n with open(filedir + 'SingleMu/' + filename) as e:\n jetdatafiles2017e = [redirector_str + s.strip() for s in e.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuE_Data'] += jetdatafiles2017e\n elif 'Run2017F' in filename:\n with open(filedir + 'SingleMu/' + filename) as f:\n jetdatafiles2017f = [redirector_str + s.strip() for s in f.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuF_Data'] += jetdatafiles2017f\n \n if 'Run2018A' in filename:\n with open(filedir + 'SingleMu/' + filename) as a:\n jetdatafiles2018a = [redirector_str + s.strip() for s in a.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuA_Data'] += jetdatafiles2018a\n elif 'Run2018B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2018b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuB_Data'] += jetdatafiles2018b\n elif 'Run2018C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2018c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuC_Data'] += jetdatafiles2018c\n elif 'Run2018D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2018d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuD_Data'] += jetdatafiles2018d\n \n \n # print(filesets['UL16postVFP_JetHT_Data'])\n # print('==========================================================================================================')\n # print(filesets['UL16postVFP_TTbar'])\n \n return filesets", "def add_group_data(self, group_name):\n self.sorted = False\n self.grouped = False\n self.labels_to_add = []\n for path in self.all_groups.get(group_name):\n io = NWBHDF5IO(path, 'r')\n nwb_file = io.read()\n # self.labels.append(nwb_file.identifier)\n self.nwb_path_list.update({nwb_file.identifier: path})\n self.labels_to_add.append(nwb_file.identifier)\n self.musketeers_widget.session_widget.populate(self.labels_to_add, 'add')\n self.musketeers_widget.session_widget.update_text_filter()\n self.groupMenu.setEnabled(True)\n self.sortMenu.setEnabled(True)", "def _return_dfs_from_zipfolder(zip_path: str) -> Dict[str, pd.DataFrame]:\n zipfolder = ZipFile(zip_path)\n df_dict = {}\n for csv_info in zipfolder.infolist():\n csv_name = csv_info.filename\n unzipped = zipfolder.open(csv_name)\n df = _load_csv_into_df(unzipped, csv_name)\n df_dict[csv_name] = df\n\n assert len(df_dict) == len(zipfolder.infolist()) # TODO: maybe check / log function\n\n return df_dict", "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def load_subvolume(base_path, subvolume, group, fields, matches, flag):\n result = {}\n\n with h5py.File(file_path(base_path, subvolume, 'subvolume'), 'r') as f:\n if flag:\n if not fields:\n fields = list(f[group].keys())\n\n for field in fields:\n if field not in f[group].keys():\n raise Exception(\"Catalog does not have requested field [{}]!\".format(field))\n\n for field in fields:\n result[field] = f[group][field][:]\n\n if matches:\n result = {**result, **load_matches(base_path, subvolume, group)}\n\n return result", "def get_data(self):\n\n all_data = OrderedDict()\n projects = [Path(proj) for proj in glob(str(self.data_path.joinpath(\"*\"))) if Path(proj).is_dir()]\n\n for project in projects:\n files = []\n \n # Read all csv files and save them as a list in files\n for ver in glob(str(project.joinpath(\"*.csv\"))):\n files.extend(pd.read_csv(ver, usecols=['time', 'buggy']).values.tolist())\n \n # Create a pandas dataframe from the csv sorted by datetime\n df = pd.DataFrame(files, columns=['Time', 'Bugs']).sort_values(by='Time').reset_index(drop=True)\n \n # Convert time to Pandas DateTime format\n df['Time'] = pd.to_datetime(df['Time']) \n \n # Group bug counts by week starting on monday\n df = df.reset_index().set_index('Time').groupby(\n [pd.Grouper(freq='W-MON')])[\"Bugs\"].sum().astype(int).reset_index()\n \n df = df.set_index('Time')\n # Save the data to dictionary\n all_data.update(OrderedDict({project.name: df}))\n\n return all_data", "def _buildindex( self ):\n try:\n import ROOT as rt\n except:\n print \"Could not load ROOT\"\n sys.exit(-1)\n \n # sigh. this is a mess\n self.producers = [] # all producer names found in ROOT files\n self.datatypes = [] # all data types\n self.flavors = [] # flavor = hash of string listing set of trees found in a given file\n self.flavor_def = {} # map from flavor to list of tree names\n self.rawdigits_entrymap = {} # only used if file type is raw digits. maps rse to (position,wfms) in data tree\n self.rawdigits_tpcindex = {}\n flavor_eventset = {}\n eventsets = []\n events_to_files = {}\n events_to_flavors = {}\n\n # this loop is going into each file in our list and\n # - taking the list of trees in the file and making a has out of their names\n # - this hash is used to define the 'flavor' of the file\n # - we also make a list of events in the tree, labeling each entry with (run,subrun,event) ID\n # - we keep track of such list of entries and group files (and flavors) with the same event list\n # - determine filetype: LArCV or LArLite\n self.filetype = None\n for f in self.larlitefilelist:\n r = rt.TFile(f)\n nfkeys = r.GetListOfKeys().GetEntries()\n\n # now here we parse the type of objects in the ROOT file\n # we are looking to determine three file types supported by pylard\n # (1) larlite (2) larcv (3) rawdigitreader\n trees = []\n for i in range(nfkeys):\n keyname = r.GetListOfKeys().At(i).GetName()\n if keyname==\"larlite_id_tree\":\n found_id_tree = True\n elif \"_tree\" in keyname:\n producer = keyname.split(\"_\")[1]\n dtype = keyname.split(\"_\")[0]\n if producer not in self.producers:\n self.producers.append( producer )\n if dtype not in self.datatypes:\n self.datatypes.append( dtype )\n elif \"rawdigitwriter\" in keyname:\n trees.append( \"rawdigitwriter/RawDigits\" )\n trees.append( \"rawdigitwriter/OpDetWaveforms\" )\n trees.append( \"rawdigitwriter/IndexRawDigits\" )\n trees.append( \"rawdigitwriter/IndexOpDetWfms\" )\n if keyname not in trees:\n trees.append(keyname)\n hashstr = \"\"\n trees.sort()\n for keyname in trees:\n hashstr += keyname +\";\"\n\n # determine filetype from type of keys we see\n is_supported_rootfile = False\n idtreename = None\n if \"larlite_id_tree\" in trees:\n thisfiletype = \"LARLITE\"\n is_supported_rootfile = True\n if \"image2d\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"partroi\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"rawdigitwriter/OpDetWaveforms\" in trees:\n thisfiletype = \"RAWDIGITS\"\n is_supported_rootfile = True\n if not is_supported_rootfile:\n continue\n\n if self.filetype is not None and self.filetype!=thisfiletype:\n print \"Error in parsing filelist: Cannot mix filetypes (LArCV/LArLite/RawDigitTree)\"\n return\n elif self.filetype is None:\n self.filetype = thisfiletype\n \n # now we determine the idtree to use\n if self.filetype==\"LARLITE\":\n idtreename = \"larlite_id_tree\"\n elif self.filetype==\"LARCV\":\n if self.loaded_larcv == False:\n s = time.time()\n import larcv as larcv\n print \"LOADING LARCV: \",time.time()-s,\"secs\"\n self.loaded_larcv = True\n for treename in trees:\n if \"image2d\" in treename:\n if idtreename is None:\n idtreename = treename\n else:\n pass # we only use this if we have to\n if \"partroi\" in treename:\n idtreename = treename # we prefer to use this tree for speed\n break\n elif self.filetype==\"RAWDIGITS\":\n idtreename = \"rawdigitwriter/IndexOpDetWfms\"\n\n if idtreename is None:\n print \"Error: Could not setup a proper ID tree for this file\"\n continue\n\n # now we parse the tree contents. define a flavor for it based on all the trees\n # we also get the (run,subrun,event) id for the event\n m = hashlib.md5()\n m.update(hashstr)\n flavor = m.digest()\n if flavor not in self.flavors:\n self.flavors.append( flavor )\n flavor_eventset[flavor] = []\n self.flavor_def[flavor] = hashstr\n if self.filetype==\"LARLITE\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"LARCV\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"RAWDIGITS\":\n idtree = r.Get(idtreename)\n \n eventset = [] # list of events\n for n in range(idtree.GetEntries()):\n idtree.GetEntry(n)\n if self.filetype==\"LARLITE\":\n rse = ( idtree._run_id, idtree._subrun_id, idtree._event_id )\n elif self.filetype==\"LARCV\":\n idbranchname = idtreename.replace(\"_tree\",\"_branch\")\n idbranch = None\n exec(\"idbranch=idtree.%s\"%(idbranchname))\n rse = ( idbranch.run(), idbranch.subrun(), idbranch.event() )\n elif self.filetype==\"RAWDIGITS\":\n rse = ( idtree.idx_run, idtree.idx_subrun, idtree.idx_event )\n self.rawdigits_entrymap[rse] = (idtree.entrystart, idtree.nentries )\n eventset.append(rse)\n if rse not in flavor_eventset[flavor]:\n flavor_eventset[flavor].append( rse )\n else:\n raise ValueError( \"found a repeated run/subrun/event index (%s). what?\"%( str(rse) ) )\n if self.filetype==\"RAWDIGITS\":\n # rawdigits has another tree index for the TPC\n tpcindex = r.Get(\"rawdigitwriter/IndexRawDigits\")\n for n in range(tpcindex.GetEntries()):\n tpcindex.GetEntry(n)\n rse = ( tpcindex.idx_run, tpcindex.idx_subrun, tpcindex.idx_event )\n self.rawdigits_tpcindex[rse] = (tpcindex.entrystart, tpcindex.nentries)\n \n eventset = tuple(eventset)\n if eventset not in events_to_files:\n events_to_files[eventset] = {}\n events_to_flavors[eventset] = []\n eventsets.append( eventset )\n events_to_files[eventset][flavor] = f\n events_to_flavors[eventset].append( flavor )\n del idtree\n r.Close()\n self.parsed = True\n\n # now we take our collection of event lists and\n # - sort the event lists\n # - make lists of files with the same set of events in the order of the sorted event list\n # - for each list we also make a dictionary between (run,subrun,event) index to the entry number\n # - we pick the list with the biggest number of events as the \"official\" file list\n eventsets.sort()\n flavorfiles = {}\n flavorsets = []\n\n flavorset_rse_dict = {}\n flavorset_entry_dict = {}\n for eventset in eventsets:\n events_to_flavors[eventset].sort() # sort the flavors with this event-set\n flavorset = tuple( events_to_flavors[eventset] )\n if flavorset not in flavorfiles:\n flavorfiles[flavorset] = []\n flavorsets.append(flavorset)\n flavorset_rse_dict[flavorset] = {}\n flavorset_entry_dict[flavorset] = {}\n for flavor in flavorset:\n flavorfiles[flavorset].append( events_to_files[eventset][flavor] )\n for rse in eventset:\n ientry = len( flavorset_rse_dict[flavorset] )\n flavorset_rse_dict[flavorset][rse] = ientry\n flavorset_entry_dict[flavorset][ientry] = rse\n\n # look for largest fileset\n maxset = None\n nfiles = 0\n for fset in flavorsets:\n n = len(flavorfiles[fset])\n if n>nfiles:\n nfiles = n\n maxset = fset\n # these are the final file list and event dictionary we want\n self.sorted_filelist = flavorfiles[maxset]\n self.rse_dict = flavorset_rse_dict[maxset]\n self.entry_dict = flavorset_entry_dict[maxset]\n\n # for rawdigits, we also build the entry to data map\n if self.filetype==\"RAWDIGITS\":\n treepos = 0\n treepos_tpc = 0\n for entry in range(len(self.entry_dict)):\n rse = self.entry_dict[entry] \n # update OPDET tree\n pos_entries = self.rawdigits_entrymap[rse] # pos is from start of file, nentries is for the event block\n merged_pos_entries = ( treepos, pos_entries[1] )\n treepos += pos_entries[1]\n self.rawdigits_entrymap[rse] = merged_pos_entries # update \n # update TPC tree\n pos_entries = self.rawdigits_tpcindex[rse]\n merged_pos_entries = ( treepos_tpc, pos_entries[1] )\n treepos_tpc += pos_entries[1]\n self.rawdigits_tpcindex[rse] = merged_pos_entries # update", "def files(self, group):\n raise NotImplementedError", "def load_data(folder_path: str) -> dict:\n # iterate over all files in each dataset folder\n data_dict = dict()\n for filename in os.listdir(folder_path):\n print(f'{strftime(\"%a, %d %b %Y %H:%M:%S\", gmtime())} load {filename} from {folder_path}')\n if filename == '.DS_Store':\n continue\n # connect all part of files of the same dataset\n file_path = os.path.join(folder_path, filename)\n file = joblib.load(file_path)\n data_dict[filename.split('.', 1)[0]] = file\n\n len_df = pd.DataFrame(data=data_dict[f'branches_lengths_list'],\n index=data_dict[f'branch_comments_embedded_text_df'].index)\n data_dict['len_df'] = len_df\n\n return data_dict", "def __init__(self):\n groups = [\n os.path.splitext(f)[0] for f in os.listdir(data_dir) if f.endswith(\".json\")\n ]\n\n self._data = {\n group: IndicatorGroup.parse_file(os.path.join(data_dir, f\"{group}.json\"))\n for group in groups\n }", "def merge_groups(loop_ds, group_map, da_name, group_dim='sample_id', group_n_dim='group_n'):\n cell_count = loop_ds.coords[group_n_dim].to_pandas()\n loop_ds[da_name] = loop_ds[da_name] * loop_ds.coords[group_n_dim]\n\n loop_ds['_sample_group'] = group_map\n loop_ds = loop_ds.groupby('_sample_group').sum(dim=group_dim)\n\n sample_group_count = cell_count.groupby(group_map).sum()\n sample_group_count.index.name = '_sample_group'\n loop_ds.coords[group_n_dim] = sample_group_count\n\n loop_ds[da_name] = loop_ds[da_name] / loop_ds[group_n_dim]\n\n loop_ds = loop_ds.rename({\n '_sample_group': group_dim\n })\n return loop_ds", "def _merge_groups(self):\n fof_rdd = self.fof_rdd\n nPartitions = self.nPartitions\n \n def remap_local_groups(iterator): \n gmap = iterator.next() \n for p_arr in iterator:\n remap_gid_partition_cython(p_arr, gmap)\n yield p_arr\n\n mapping = self._get_level_map()\n\n group_merge_map = (mapping.flatMap(lambda (g,g_p):\n [(gid, (g,g_p)) for gid in [decode_partition(g), decode_partition(g_p)]])\n .partitionBy(nPartitions)\n .map(lambda (k,v): v, preservesPartitioning=True)\n .mapPartitions(create_map_dict, True)).cache() \n\n merged_rdd = (group_merge_map + fof_rdd).mapPartitions(remap_local_groups, preservesPartitioning=True)\n merged_rdd.setName('merged_rdd')\n\n self.group_merge_map = group_merge_map\n\n return merged_rdd", "def generate_total_summary_table(self, groups, table_name=None, group_names=None):\n groups['chunks'].sort(key=lambda chunk: chunk['id'])\n\n patch_paths = []\n for chunk in groups['chunks']:\n patch_paths.extend(chunk['imgs'])\n \n category_names = [c.name for c in self.CategoryEnum]\n patches = {name: set() for name in category_names}\n slides = {name: set() for name in category_names}\n patients = {name: set() for name in category_names}\n all_patches = set()\n all_slides = set()\n all_patients = set()\n cum_header = 'Overall' if self.is_binary else 'Total'\n headers = category_names + [cum_header]\n num_headers = len(headers)\n total_patches = pd.DataFrame(columns=headers)\n total_slides = pd.DataFrame(columns=headers)\n total_patients = pd.DataFrame(columns=headers)\n patient_patches = pd.DataFrame(columns=headers)\n slide_patches = pd.DataFrame(columns=headers)\n patient_slides = pd.DataFrame(columns=headers)\n for patch_path in patch_paths:\n patch_id = utils.create_patch_id(patch_path, self.patch_pattern)\n label = utils.get_label_by_patch_id(patch_id, self.patch_pattern,\n self.CategoryEnum, is_binary=self.is_binary).name\n slide_name = utils.get_slide_by_patch_id(patch_id, self.patch_pattern)\n patient_id = utils.get_patient_by_slide_id(slide_name,\n dataset_origin=self.dataset_origin)\n\n patches[label].add(patch_id)\n\n if slide_name not in slides[label]:\n if patient_id not in patient_slides.index:\n patient_slides.loc[patient_id] = [0] * num_headers\n patient_slides.at[patient_id, label] += 1\n if slide_name not in all_slides:\n patient_slides.at[patient_id, cum_header] += 1\n \n slides[label].add(slide_name)\n patients[label].add(patient_id)\n\n if patient_id not in patient_patches.index:\n patient_patches.loc[patient_id] = [0] * num_headers\n patient_patches.at[patient_id, label] += 1\n if patient_id not in all_patients:\n patient_patches.at[patient_id, cum_header] += 1\n\n if slide_name not in slide_patches.index:\n slide_patches.loc[slide_name] = [0] * num_headers\n slide_patches.at[slide_name, label] += 1\n if slide_name not in all_slides:\n slide_patches.at[slide_name, cum_header] += 1\n\n all_patches.add(patch_id)\n all_slides.add(slide_name)\n all_patients.add(patient_id)\n \n for label, s in patches.items():\n total_patches['Total', label] = len(s)\n total_patches['Total', cum_header] = len(all_patches)\n for label, s in slides.items():\n total_slides['Total', label] = len(s)\n total_slides['Total', cum_header] = len(all_slides)\n for label, s in patients.items():\n total_patients['Total', label] = len(s)\n total_patients['Total', cum_header] = len(all_patients)\n\n patient_patches.loc[\"Total\"] = patient_patches.sum().astype(int)\n slide_patches.loc[\"Total\"] = slide_patches.sum().astype(int)\n patient_slides.loc[\"Total\"] = patient_slides.sum().astype(int)\n\n return {\n 'slide_patches': slide_patches,\n 'patient_patches': patient_patches,\n 'patient_slides': patient_slides,\n 'total_patches': total_patches,\n 'total_slides': total_slides,\n 'total_patients': total_patients,\n }", "def get_datasets(load_key=None, maven=False):\n ds_names = {}\n if load_key == 'R2349': \n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_names['batsrus_multi_species'] = model_dir+'R2349/batsrus_3d_multi_species.h5'\n ds_names['batsrus_electron_pressure'] = model_dir+'R2349/batsrus_3d_pe.h5'\n ds_names['heliosares'] ='/Volumes/triton/Data/ModelChallenge/R2349/heliosares_multi.h5'\n #ds_names['rhybrid'] ='/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5'\n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'multi_fluid' in key],\n 'batsrus2':[key for key in ds_names.keys() if 'multi_species' in key],\n 'batsrus3':[key for key in ds_names.keys() if 'electron_pressure' in key],\n 'batsrus4':[key for key in ds_names.keys() if 'mf_lr' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key],\n 'rhybrid_helio':[key for key in ds_names.keys() if 'rhybrid' in key ]}\n if maven or True:\n ds_names['maven']=orbit_dir+'orbit_2349.csv'\n #ds_names['maven'] = orbit_dir+'orbit_plume_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'batsrus_mf_lowres':\n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_types = {'batsrus_mf_lr' : ['batsrus_mf_lr']}\n\n\n elif load_key == 'helio_multi':\n ds_names['t00550'] = model_dir+'R2349/Heliosares_Multi/t00550.h5'\n ds_names['t00560'] = model_dir+'R2349/Heliosares_Multi/t00560.h5'\n ds_names['t00570'] = model_dir+'R2349/Heliosares_Multi/t00570.h5'\n ds_names['t00580'] = model_dir+'R2349/Heliosares_Multi/t00580.h5'\n ds_names['t00590'] = model_dir+'R2349/Heliosares_Multi/t00590.h5'\n ds_names['t00600'] = model_dir+'R2349/Heliosares_Multi/t00600.h5'\n ds_names['t00610'] = model_dir+'R2349/Heliosares_Multi/t00610.h5'\n ds_names['t00620'] = model_dir+'R2349/Heliosares_Multi/t00620.h5'\n ds_names['t00630'] = model_dir+'R2349/Heliosares_Multi/t00630.h5'\n ds_names['t00640'] = model_dir+'R2349/Heliosares_Multi/t00640.h5'\n ds_names['t00650'] = model_dir+'R2349/Heliosares_Multi/t00650.h5'\n\n ds_types = {'heliosares':[key for key in ds_names.keys()]}\n if maven:\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'SDC_BATS':\n ds_names['LS180_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_max.h5'\n ds_names['LS270_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_max.h5'\n ds_names['LS090_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_max.h5'\n ds_names['LS180_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_max.h5'\n ds_names['LS270_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_max.h5'\n ds_names['LS090_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_max.h5'\n ds_names['LS180_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_max.h5'\n ds_names['LS270_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_max.h5'\n ds_names['LS090_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_max.h5'\n ds_names['LS180_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_min.h5'\n ds_names['LS270_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_min.h5'\n ds_names['LS090_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_min.h5'\n ds_names['LS180_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_min.h5'\n ds_names['LS270_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_min.h5'\n ds_names['LS090_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_min.h5'\n ds_names['LS180_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_min.h5'\n ds_names['LS270_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_min.h5'\n ds_names['LS090_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_min.h5'\n\n ds_types = {'batsrus':[key for key in ds_names.keys()]}\n\n elif load_key == 'SDC_G1':\n #BATSRUS\n ds_names['bats_min_LS270_SSL0'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG0.h5'\n ds_names['bats_min_LS270_SSL180'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG180.h5'\n ds_names['bats_min_LS270_SSL270'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG270.h5' \n \n #HELIOSARES\n #ds_names['helio_1'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_1.h5'\n \n #ds_names['helio_2'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_2.h5'\n \n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'bats' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key]}\n if maven:\n pass\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n #ds_types['maven']=['maven']\n\n elif load_key == 'rhybrid_res':\n ds_names = {'rhybrid240':'/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5',\n 'rhybrid120':'/Volumes/triton/Data/ModelChallenge/R2349/HYB/state00030000.h5'}\n ds_types = {'rhybrid1':['rhybrid240'], 'rhybrid2':['rhybrid120']}\n elif load_key == 'batsrus_tseries':\n ds_names = {'batsrus_mf':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_mf/3d__ful_4_n00040000.h5',\n 'batsrus_ms':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_ms/3d__mhd_6_n0050000.h5'}\n ds_types = {'batsrus_mf':['batsrus_mf'], 'batsrus_ms':['batsrus_ms']}\n\n elif load_key == 'maven':\n ds_names, ds_types = {},{}\n ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'exo_2349':\n keys = ['2349_1RM_225km','2349_1RM_450km', '2349_2RM_450km',\n '2349_2RM_900km','2349_4RM_900km'] \n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonA':\n keys = ['2349_1RM_225km', '2349_2RM_450km',\n '2349_1.5RM_338km'] \n ds_names = {k:exo_dir+'/ComparisonA/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonB':\n keys = ['2349_1RM_225km', 'T0_1RM_225km', 'T1_1RM_225km', \"T2_1RM_225km\"] \n ds_names = {k:exo_dir+'/ComparisonB/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n elif load_key == 'exo_t1':\n keys = ['T1_1RM_112km', 'T1_1RM_225km', #'T1_1RM_450km',\n 'T1_2RM_225km', 'T1_2RM_450km', #'T1_2RM_900km',\n 'T1_4RM_900km']\n\n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n else:\n print('No datasets selected')\n \n\n return (ds_names, ds_types)", "def prepare_data(groups):\n all_dicts = []\n for idx, group in groups:\n res_dict = {'organism': group.organism.iloc[0]}\n for g_idx, row in group.iterrows():\n if pd.notna(row.label):\n res_dict[row.cmp_name] = {'label': row.label, 'mic': row.MIC}\n else:\n res_dict[row.cmp_name] = {'label': '', 'mic': row.MIC}\n all_dicts.append(res_dict)\n return all_dicts", "def recursively_load_dict_contents_from_group(h5file: \"h5py.File\", \n path: str,\n ) -> dict:\n ans = {}\n for key, item in h5file[path].items():\n if isinstance(item, h5py._hl.dataset.Dataset):\n ans[key] = item.value\n elif isinstance(item, h5py._hl.group.Group):\n ans[key] = recursively_load_dict_contents_from_group(h5file, f\"{path}{key}/\")\n return ans", "def process_data(self):\n structure_data = self.parse_root(self.root)\n\n dict_data = {}\n for d in structure_data:\n dict_data = {**dict_data, **d}\n df = pd.DataFrame(data=list(dict_data.values()), index=dict_data.keys()).T\n\n return df", "def get_timeseries(root_dir, select_keywords=None, drop_keywords=None,\n names=None, only_wells=None):\n filenames = get_filenames(root_dir)\n\n filenames = select_filenames(filenames, select_keywords, drop_keywords)\n\n data = {}\n start_time = time()\n for ifl, (fileid, file) in enumerate(\n filenames[['file_id', 'file_name']].values):\n file_time = time()\n print('Reading timeseries from file {} of {}'.format(\n ifl+1, filenames.shape[0]))\n timeseries = read_timeseries(file, names=names, only_wells=only_wells)\n data[fileid] = timeseries\n print('File read in {} sec.'.format(time()-file_time))\n print('Done reading in {} sec.'.format(time()-start_time))\n\n return filenames, data", "def merge_physdfs(files, mode='basic'):\n\ttemp_df = pd.read_csv(files[0], index_col=False)\n\tcolumns = temp_df.columns.tolist()\n\tmerged_df = pd.DataFrame([], columns=columns)\n\n\tind = 1\n\ttot = len(files)\n\tfor file in files:\n\t\tprint(\"Merging (%d/%d): %s\" % (ind, tot, file))\n\t\tind = ind + 1\n\n\t\tdf = pd.read_csv(file, index_col=False)\n\n\t\t# add 'rat_data' column to the merged df\n\t\troot_name = file.split('/')[-1]\n\t\tdf = df.assign(raw_data=root_name)\n\n\t\t# add 'exp_label' column to the merged df\n\t\tif mode=='basic':\n\t\t\texp = re.findall(r'[a-zA-Z]{3}\\d{1}', file)\n\t\t\tdf = df.assign(exp_label=exp[0][:-1])\n\n\t\tif mode=='general':\n\t\t if 'cohort' in root_name:\n\t\t df = df.assign(exp_label=root_name[0:8])\n\t\t else:\n\t\t m = root_name.find('_') + 1\n\t\t n = root_name.find('_', m)\n\t\t df = df.assign(exp_label=root_name[m:n])\n\n\t\tif mode=='mengdi':\n\t\t\tm = root_name.find('_') + 1\n\t\t\tm = root_name.find('_', m) + 1\n\t\t\tn = root_name.find('-', m)\n\t\t\tdf = df.assign(exp_label=root_name[m:n])\n\n\t\tif mode=='stiffness':\n\t\t\tm = root_name.find('-') + 1\n\t\t\tm = root_name.find('-', m) + 1\n\t\t\tn = root_name.find('_') + 1\n\t\t\tn = root_name.find('_', n)\n\t\t\tdf = df.assign(exp_label=root_name[m:n])\n\n\t\tmerged_df = pd.concat([merged_df, df], sort=True, ignore_index=True)\n\n\treturn merged_df", "def grouping(filename, outdir, minsog, maxsog):\n records = Records(Extractor.extract_records(filename))\n\n groups = records.group(minsog, maxsog)\n for key in groups:\n rw = RecordsWriter(groups[key])\n rw.write_to_dir(key + \".fasta\", outdir)", "def get_filenames(self, root_folder):\n t_images = []\n t_labels = []\n v_images = []\n v_labels = []\n if not os.path.isfile('filenames.p'):\n for dirName, subdirList, fileList in os.walk(root_folder):\n print('Collecting ' + dirName)\n files = [fname for fname in fileList]\n if len(files) > 0:\n if 'Depth' in dirName:\n if any(subj in dirName for subj in self.val_subjects):\n v_labels.extend([os.path.join(dirName, fname) for fname in fileList])\n else:\n t_labels.extend([os.path.join(dirName, fname) for fname in fileList])\n elif 'Color' in dirName:\n if any(subj in dirName for subj in self.val_subjects):\n v_images.extend([os.path.join(dirName, fname) for fname in fileList])\n else:\n t_images.extend([os.path.join(dirName, fname) for fname in fileList])\n pickle.dump([sorted(t_images), sorted(t_labels), sorted(v_images), sorted(v_labels)], open('filenames.p', 'wb'))\n return sorted(t_images), sorted(t_labels), sorted(v_images), sorted(v_labels)\n else:\n return pickle.load(open('filenames.p', 'rb'))", "def generate_data():\n for subdir, dirs, files in os.walk(legend_images_dir):\n for _file in files:\n getTables(_file)\n\n file_list = []\n for subdir, dirs, files in os.walk(pdf_output_dir):\n for _file in files:\n if _file.endswith('.pdf'):\n file_list.append(_file)\n\n print (\"Writing merged output in Output.pdf...\")\n current_dir = os.getcwd()\n mergeOutput(file_list, current_dir + \"/Output.pdf\")\n\n clean()", "def load_grouping():\n if os.path.isfile('grouping.json'):\n logger.debug(\"Grouping file exists. Loading..\")\n with open('grouping.json', 'r+') as f:\n try:\n grouping_json = json.loads(f.read())\n except ValueError:\n grouping_json = json.loads(\"{}\")\n logger.debug(\"Error parsing grouping.json.\")\n else:\n grouping_json = json.loads(\"{}\")\n return grouping_json", "def process_traces(subdirs,dates,load_path):\n\n N = 60*60*24*len(dates)*10\n\n firing_rates_storage = np.zeros((N))\n var_storage = np.zeros((N))\n position_storage = np.zeros((N,2))\n firing_rates_storage[:] = np.nan\n var_storage[:] = np.nan\n timestamps = np.zeros((N))\n clusters = np.zeros((N))\n pk_max = 0\n n=0\n\n for subdir,date in zip(subdirs,dates):\n \n dpk = pk_max \n path = load_path+'/%s/'%subdir\n file = [i for i in os.listdir(path) if '.pkl' in i] \n \n if len(file) == 0:\n continue\n \n pd_ob = pkl.load(open(path+file[0],'rb'))\n \n positions = pd_ob['positions']\n sts = pd_ob['sts']\n isis = pd_ob['isis']\n fsts = pd_ob['fsts']\n fisis = pd_ob['fisis']\n et = pd_ob['et']\n ep = pd_ob['ep']\n \n max_time = 0\n for k,v in sts.items():\n max_time = max(max_time,np.max(v))\n \n for t in np.arange(0,np.floor(max_time)):\n\n for i,pk in enumerate(sts.keys()):\n if np.count_nonzero((sts[pk]>t) & (sts[pk]<(t+1))) > 1:\n\n p = positions[pk][:-1]\n\n x = sts[pk]\n y = isis[pk]\n fx = fsts[pk]\n fy = fisis[pk]\n\n firing_rates_storage[n] = np.nanmean(y[(x>t) & (x<t+1)])\n var_storage[n] = np.nanvar(y[(x>t) & (x<t+1)])\n position_storage[n] = np.nanmean(p[(x>t) & (x<t+1)],axis=0)\n timestamps[n] = (date + timedelta(0,int(t))).timestamp()\n clusters[n] = pk + dpk\n n=n+1\n pk_max = max(pk_max,pk+dpk)\n\n firing_rates_storage = firing_rates_storage[:n]\n var_storage = var_storage[:n]\n position_storage = position_storage[:n]\n timestamps = timestamps[:n]\n clusters = clusters[:n]\n\n np.savez(load_path+'processed_traces.npz',frs=firing_rates_storage,vs=var_storage,pos=position_storage,ts=timestamps,cl=clusters)\n return 0", "def get_file_df(self, file_list):\n file_dict = {\n file.split(\".\")[0]: {\"Date\": file.split(\".\")[1], \"File\": file}\n for file in file_list\n }\n df = pd.DataFrame(file_dict).T\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n df[\"File\"] = df[\"File\"].astype(\"string\")\n df = df.reset_index()\n df.rename(columns={\"index\": \"League\"}, inplace=True)\n df = df.sort_values(by=[\"Date\"], ascending=False)\n return df", "def build_dataset_join_dfs(pset_dict, pset_name, primary_dfs={}):\n cell_df = primary_dfs['cell'] if 'cell' in primary_dfs else None\n tissue_df = primary_dfs['tissue'] if 'tissue' in primary_dfs else None\n compound_df = primary_dfs['drug'] if 'drug' in primary_dfs else None\n\n join_dfs = {}\n join_dfs['dataset_cell'] = build_dataset_cell_df(\n pset_dict, pset_name, cell_df)\n join_dfs['dataset_tissue'] = build_dataset_tissue_df(\n pset_dict, pset_name, tissue_df)\n join_dfs['dataset_compound'] = build_dataset_compound_df(\n pset_dict, pset_name, compound_df)\n return join_dfs", "def datamerge_run(filenames, outdir, roc_cols):\n \n tbldict = collect2dict(filenames, outdir)\n tbldict = cogtest_manipulation(tbldict, roc_cols)\n \n #count number of tps\n tbldict['cogtests'] = count_instances(tbldict['cogtests'], 'codeb', 'NP_NoTps')\n tbldict['aseg_change'] = count_instances(tbldict['aseg_change'], 'codea', 'MRI_NoTps')\n tbldict['pibparams'] = count_instances(tbldict['pibparams'], 'codea', 'PIB_NoTps')\n \n new_tbldict = {}\n for key, tbl in tbldict.iteritems():\n tpcol = [s for s in tbl.columns if ('_Tp' in s)]\n if tpcol:\n tpcol = tpcol[0]\n tblflat, tblflatnm = flatten(tbl, tpcol, key, [1, '1'])\n new_tbldict[tblflatnm] = tblflat\n tbldict.update(new_tbldict)\n \n #make sure each table contains SubjID and BAC# fields\n for key, tbl in tbldict.iteritems():\n tbl = addcodes(tbl, tbldict['codetranslator'])\n tbldict[key] = tbl\n \n #merge tables\n tblstojoin = ['cogtests_flat','pibparams_flat','aseg_change_flat','fdg_metaroi_flat','subjinfo']\n joincol = ['codea','codeb']\n subjtbl = mergelots(tbldict, tblstojoin, joincol)\n \n #merge tables\n tblstojoin = ['cogtests','subjinfo','pibparams_flat','aseg_change_flat','fdg_metaroi_flat']\n joincol = ['codea','codeb']\n NPtbl = mergelots(tbldict, tblstojoin, joincol)\n \n cf.save_xls_and_pkl(subjtbl, 'subjtbl', outdir)\n cf.save_xls_and_pkl(NPtbl, 'NPtbl', outdir)\n \n return tbldict, NPtbl, subjtbl", "def create_meta_df_valSet(srcFolderPath, labelsPath, destFolderPath, stop='all'):\n # iterate over file in subfolders in labelsPath and retrieve the key values from each\n sfp_lst = os.listdir(labelsPath)\n infiles, outfiles, nFrames = [], [], []\n traversed_tot = 0\n for sf in sfp_lst:\n traversed = 0\n sub_lab_path = os.path.join(labelsPath, sf)\n #sub_src_path = os.path.join(srcFolderPath, sf)\n sub_dest_path = os.path.join(destFolderPath, sf)\n if os.path.isdir(sub_lab_path):\n # create destination path to store the npy file\n if not os.path.exists(sub_dest_path):\n os.makedirs(sub_dest_path)\n \n labfiles = os.listdir(sub_lab_path)\n for lfile in labfiles:\n # if lfile is a json file, then get the subpath key and append\n if os.path.isfile(os.path.join(sub_lab_path, lfile)) and \\\n lfile.rsplit('.', 1)[1] in {'json', 'csv'}:\n lfilepath = os.path.join(sub_lab_path, lfile)\n with open(lfilepath, 'r') as fp:\n label = json.load(fp)\n if label is not None and len(label.keys()) > 0:\n src_file = os.path.join(srcFolderPath, label.keys()[0])\n dest_file = os.path.join(destFolderPath, \\\n label.keys()[0].rsplit('.', 1)[0]+\".npy\")\n if os.path.isfile(dest_file):\n print(\"Feats already present : {}\".format(dest_file))\n continue\n infiles.append(src_file)\n outfiles.append(dest_file)\n nFrames.append(getTotalFramesVid(src_file))\n traversed += 1\n \n if stop != 'all' and traversed == stop:\n break\n traversed_tot += traversed\n \n print \"No. of files to be written to destination : \"+str(traversed_tot)\n if traversed_tot == 0:\n print \"Check the structure of the dataset folders !!\"\n return traversed_tot\n\n ###########################################################################\n #### Form the pandas Dataframe and parallelize over the files.\n filenames_df = pd.DataFrame({\"infiles\":infiles, \"outfiles\": outfiles, \"nframes\": nFrames})\n filenames_df = filenames_df.sort_values([\"nframes\"], ascending=[True])\n filenames_df = filenames_df.reset_index(drop=True)\n \n return filenames_df", "def load_group(self, group_name):\n self.sorted = False\n self.grouped = False\n self.nwb_path_list = dict()\n self.labels = []\n for path_list in {group_name: self.all_groups.get(group_name)}.values():\n for path in path_list:\n io = NWBHDF5IO(path, 'r')\n nwb_file = io.read()\n self.labels.append(nwb_file.identifier)\n self.nwb_path_list.update({nwb_file.identifier: path})\n self.musketeers_widget.session_widget.populate(self.labels)\n self.musketeers_widget.session_widget.update_text_filter()\n self.groupMenu.setEnabled(True)\n self.sortMenu.setEnabled(True)", "def get_data_parascans(rootdir, datasetnames, filterdata):\n datasets = {}\n\n print('Loading: ' + str(len(datasetnames)) + ' datasets')\n for dataset in tqdm(datasetnames):\n time.sleep(0.1)\n\n # Original images (to predict)\n images_org = load_scans(rootdir + dataset + '/crop_org')\n\n # Ground truth images (mask image of expert)\n images_gt = load_scans(rootdir + dataset + '/crop_gt')\n images_gt = sitk.GetArrayFromImage(images_gt)\n\n # Smoothed images by specific filter\n images_smoothed = load_scans_filter(images_org, filterdata)\n\n # Save images in datasets dictionary\n datasets.update({dataset : {'org': images_org, 'gt': images_gt, 'smoothed': images_smoothed}})\n\n print(\"datasets created\")\n return datasets", "def search_load_toa5df(api_token, base_url, search_params, biggish_data=False,\n keep_files=False, multiple_delim=False,\n dst_folder='./raw_data'):\n # search records\n records = search(api_token, base_url, search_params)\n\n # use 'biggish data' mode\n if biggish_data:\n # set and create download folder if it does not exist\n dst_folder = Path(dst_folder)\n if not dst_folder.is_dir():\n os.makedirs(dst_folder)\n\n # display number of files beeing downloaded\n print(f'Downloading {len(records)} files:')\n\n # build download url for each file\n for record in tqdm.tqdm(records):\n download_url = f\"{record['url']}?auth_token={api_token}\"\n\n # check if file exists, if not downloads\n file_path = dst_folder / record['filename']\n if not file_path.is_file():\n urllib.request.urlretrieve(download_url, file_path)\n\n # create empty dataframe to store final data\n df_all = pd.DataFrame()\n\n # loop through all downloaded files\n for i in list(dst_folder.glob('*.dat')):\n\n # read data into dataframe discarding undesired header columns\n if multiple_delim:\n df = pd.read_csv(i, skiprows=[0, 2, 3], na_values='NAN',\n sep='\\\\t|,|;', engine='python')\n df.columns = [i.replace('\"', \"\") for i in df.columns]\n df['TIMESTAMP'] = df['TIMESTAMP'].str.replace('\"', '')\n else:\n df = pd.read_csv(i, skiprows=[0, 2, 3], na_values='NAN')\n\n # generate datetimeindex\n df = df.set_index('TIMESTAMP')\n df.index = pd.to_datetime(df.index)\n\n # optimize memory usage\n # first get names of float, integer and object columns\n float_cols = df.select_dtypes(include=['float64']).columns\n integer_cols = df.select_dtypes(include=['int64']).columns\n object_cols = df.select_dtypes(include=['object']).columns\n # the assign dtype that uses least memory for each column\n df[integer_cols] = df[integer_cols].apply(\n pd.to_numeric, downcast='integer')\n df[float_cols] = df[float_cols].apply(\n pd.to_numeric, downcast='float')\n # converting objects to category is only more memory efficient if\n # less tha 50% of values are unique\n for col in object_cols:\n num_unique_values = len(df[col].unique())\n num_total_values = len(df[col])\n if num_unique_values / num_total_values < 0.5:\n df[col] = df[col].astype('category')\n\n # append data\n df_all = pd.concat([df_all, df], sort=False)\n\n # delete dst_folder if wanted\n if not keep_files:\n shutil.rmtree(dst_folder)\n\n else:\n # print number of records found\n print(f'Loading {len(records)} files:')\n\n # create empty dataframe to save data in\n df_all = pd.DataFrame()\n\n # loop through all records and generate progressbar\n for record in tqdm.tqdm(records):\n # build download url for each file\n download_url = f\"{record['url']}?auth_token={api_token}\"\n # get data\n req = urllib.request.urlopen(download_url)\n data = req.read()\n\n # read data into dataframe discarding undesired header columns\n if multiple_delim:\n df = pd.read_csv(io.StringIO(data.decode('utf-8')),\n skiprows=[0, 2, 3], na_values='NAN',\n sep='\\\\t|,|;', engine='python')\n df.columns = [i.replace('\"', \"\") for i in df.columns]\n df['TIMESTAMP'] = df['TIMESTAMP'].str.replace('\"', '')\n else:\n df = pd.read_csv(io.StringIO(data.decode('utf-8')),\n skiprows=[0, 2, 3], na_values='NAN')\n\n # generate datetimeindex\n df = df.set_index('TIMESTAMP')\n df.index = pd.to_datetime(df.index)\n\n # infer data types of all other columns\n df = df.infer_objects()\n\n # append data\n df_all = pd.concat([df_all, df], sort=False)\n\n # if from_date provided sort and trim data\n if 'from_date' in search_params:\n df_all = df_all.sort_index()[search_params['from_date']:]\n # if to_date provided sort and trim data\n if 'to_date' in search_params:\n df_all = df_all.sort_index()[:search_params['to_date']]\n\n return df_all.drop_duplicates()", "def load_results(fnames=None, base_fname='figure_data_'):\n\n if fnames==None:\n fnames = glob.glob(base_fname + '*.npz')\n\n num_subfunctions = None\n full_objective_period = None\n\n history_nested = {}\n for fn in fnames:\n data = np.load(fn)\n if num_subfunctions is None:\n num_subfunctions = data['num_subfunctions']\n full_objective_period = data['full_objective_period']\n if not (num_subfunctions == data['num_subfunctions'] and full_objective_period == data['full_objective_period']):\n print \"****************\"\n print \"WARNING: mixing data with different numbers of subfunctions or delays between evaluating the full objective\"\n print \"make sure you are doing this intentionally (eg, for the convergence vs., number subfunctions plot)\"\n print \"****************\"\n model_name = data['model_name'].tostring()\n print(\"loading\", model_name)\n if not model_name in history_nested:\n history_nested[model_name] = data['history'][()].copy()\n else:\n print(\"updating\")\n for subkey in history_nested[model_name].keys():\n print subkey\n history_nested[model_name][subkey].update(data['history'][()].copy()[subkey])\n data.close()\n\n return history_nested, num_subfunctions, full_objective_period", "def _get_group_example_data(self, data_group_id: str) -> Dict[\n str, dict\n ]:\n return {\n e['example_id']: self._get_example_data(e['example_id'])\n for e in self.tasks['data_groups'][data_group_id]\n }", "def get_files(self):\n def _get_files_by_names(files, name_set, postfix):\n ret = []\n for f in files: \n name = osp.basename(f).split(\"_%s\" % postfix)[0]\n if name in name_set:\n ret.append(f)\n return ret\n\n frame1_files = sorted(glob.glob(osp.join(self.root, 'images', \"*_pre_disaster*\")))\n frame2_files = sorted(glob.glob(osp.join(self.root, \"images\", \"*_post_disaster*\")))\n label_files = sorted(glob.glob(osp.join(self.root, \"masks\", \"*_change*\")))\n assert len(frame1_files) == len(frame2_files) == len(label_files), \\\n \"%d, %d, %d\" % (len(frame1_files), len(frame2_files), len(label_files))\n\n file_names = [osp.basename(f).split(\"_pre\")[0] for f in frame1_files]\n file_names = sorted(list(set(file_names)))\n if self.isTrain:\n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[0]\n else: \n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[1]\n self.frame1_files = _get_files_by_names(frame1_files, name_set, 'pre')\n self.frame2_files = _get_files_by_names(frame2_files, name_set, 'post')\n self.label_files = _get_files_by_names(label_files, name_set, 'change')", "def process_group(pattern, params):\n # check subdirectory according to filter options\n subdir = params['label']\n # and the parameters label\n print 'Processing:', subdir\n\n # search for videos matching the pattern\n search = os.path.join(ROOT_RAWDATA_DIR, pattern)\n print 'Search pattern:', search\n flist = sorted(glob.glob(search))\n\n # for each matching video\n for f in flist:\n # video structures (copied from LEGOS FTP) is yyyymmdd/HH/MM.mp4\n # and we want to store frames as yyyymmdd/yyyymmdd_HH/yyyymmdd_HHMM/yyyymmdd_HHMM_<index>.<format>\n # so: recursively split to extract basename, hour and date\n p, fname = os.path.split(f)\n p, hour = os.path.split(p)\n p, date = os.path.split(p)\n minute, _ = os.path.splitext(fname)\n # compute output dir, and prefix for frames\n outdir = os.path.join(ROOT_PREPROC_DIR,\n subdir, # according to parameters\n date,\n '{}_{}'.format(date, hour),\n '{}_{}{}'.format(date, hour, minute),\n )\n prefix = '{}_{}{}_'.format(date, hour, minute)\n # create output directory if neeeded\n if not os.path.exists(outdir):\n print 'Creating output directory', outdir\n os.makedirs(outdir, 0755)\n # call decoder\n command = ['python', '-u', 'decoder.py',\n f,\n '-o', outdir,\n '-p', prefix,\n '-l', params['label'],\n '-f', str(params['image_format']),\n '-m', str(params['median_length']),\n '-r', str(params['resolution']),\n '-O', str(params['origin'][0]), str(params['origin'][1]),\n '-d', str(params['dimensions'][0]), str(params['dimensions'][1]),\n '-a', str(params['rotation']),\n ]\n subprocess.call(command)", "def get_data_scans(rootdir, datasetnames, filterdata):\n datasets = {}\n\n print('Loading: ' + str(len(datasetnames)) + ' datasets')\n for dataset in tqdm(datasetnames):\n time.sleep(0.1)\n\n # Original images (to predict)\n images_org = load_scans(rootdir + dataset + '/crop_org')\n\n # Smoothed images by specific filter\n images_smoothed = load_scans_filter(images_org, filterdata)\n\n # Save images in datasets dictionary\n datasets.update({dataset : {'org': images_org, 'smoothed': images_smoothed}})\n\n print(\"datasets created\")\n return datasets", "def get_all(folder, filter_funs = []) :\n\n def apply_funs(x, funs) :\n \"\"\"Applies the filter functions.\"\"\"\n res = True\n for f in funs :\n res = f(x)\n if not res :\n break\n return res\n \n final = {}\n files = listdir(folder)\n print(\"Loading Spectras\")\n for f in files :\n try :\n spectra = Spectra(folder + \"/\" + f)\n print(\".\", end=\"\")\n except:\n continue\n if spectra == None :\n continue\n if not apply_funs(spectra, filter_funs) :\n continue\n pot_spectra = final.get(spectra.database_id, None)\n if not pot_spectra :\n final[spectra.database_id] = [deepcopy(spectra)]\n else :\n pot_spectra.append(deepcopy(spectra))\n return final", "def load_data():\n data_path = os.path.join('qual-o-mat-data', 'data', '2019', 'europa')\n data_keys = [\"answer\", \"comment\", \"opinion\", \"party\", \"statement\"]\n raw_data = dict()\n all_data = dict()\n\n # Create a dictionary of type <string, DataFrame> that contains the data from all JSON files\n for dk in data_keys:\n json_file = os.path.join(data_path, dk + \".json\")\n with open(json_file, \"r\") as fh:\n raw_data[dk] = json.load(fh)\n all_data[dk] = pd.DataFrame(raw_data[dk])\n\n\n # Based on the opinion data, merge all other data frames on their ID fields to get usable names instead of just ID numbers\n merged_df = all_data[\"opinion\"].copy()\n for to_merge in [\"party\", \"statement\", \"comment\", \"answer\"]:\n merged_df = merged_df.merge(all_data[to_merge], how='inner', left_on=[to_merge], right_on=['id'])\n\n #print(mdf.head())\n return merged_df, all_data, raw_data", "def batchAnalysis(groupfil):\n groups = []\n with open(groupfil, 'r') as fIn:\n for line in fIn:\n groups.append(line.strip().split(','))\n \n checks = ['maxV', 'maxDerivV', 'maxDerivdV', 'minDerivV',\n 'minDerivdV', 'preMinV', 'postMinV', 'preMaxCurveV',\n 'preMaxCurveK', 'postMaxCurveV', 'postMaxCurveK',\n 'height', 'repolarizationV', 'intervals', 'frequencies']\n props = {ch: {gr: {} for gr in list(set([g[1] for g in groups]))}\n for ch in checks} # A dict of dicts\n # props [properties] [group name] [cell name]\n cells = [f[0].split('/')[-1].split('_')[0] for f in groups]\n \n # Add a few more keys\n props['activity'] = {gr: {} for gr in list(set([g[1] for g in groups]))}\n \n # Assign all the properties to the props dict\n for g in groups:\n df = pd.read_csv(g[0])\n df = df.drop('Unnamed: 33', 1) # Garbage\n df = df.drop('freq', 1) # These are downsampled\n df = df.dropna() # Dropna\n \n # If there are multiple clusters, add them in order\n if max(df.clust_inds) == 1: # Two clusters\n numClusts = int(max(df.clust_inds)+1)\n for ch in checks:\n for clust in range(numClusts):\n try:\n props[ch][g[1]][cells[groups.index(g)]].append(df[df['clust_inds']==clust][ch].dropna().values)\n except:\n props[ch][g[1]][cells[groups.index(g)]] = [df[df['clust_inds']==clust][ch].dropna().values]\n else: # Just one cluster\n for ch in checks:\n props[ch][g[1]][cells[groups.index(g)]] = [df[ch].dropna().values]\n # Get activity profile\n tIn, cBouts = timeInClusters(df)\n props['activity'][g[1]][cells[groups.index(g)]] = [tIn, cBouts]\n \n return props", "def _get_check_files(self, group=None, severity=None):\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if (not severity) or severity == sev:\n check_files += Config.get_check_files(group=g,\n names=files,\n severity=sev)\n groups[g] = check_files\n return groups", "def get_filename_df(level, env='stage', pattern=None):\n fnames = get_filenames(level, env=env, pattern=pattern)\n iuvs_fnames = []\n for fname in fnames:\n if not level == 'hk':\n iuvs_fnames.append(ScienceFilename(fname))\n else:\n iuvs_fnames.append(HKFilename(fname))\n df = pd.DataFrame([fname.as_series() for fname in iuvs_fnames])\n if level != 'hk':\n df['channel'] = df.channel.astype('category')\n df.set_index('time', inplace=True)\n df.sort_index(inplace=True)\n # next line filters for newest revisions\n return df[df.p.isin(df.groupby('obs_id', sort=False)['p'].max())]", "def get_files(self):\n\n # Grab master data - use existing header, remove unhappy columns\n\n self.df_mas_lab_data = pd.read_csv(\n self.master_csv, dtype=str, usecols=self.columns\n )\n\n # Delete rows, where column FACILITY_TYPE != Independent, Hospital,\n # Physician Office\n facility_type_keep_list = [\"Independent\", \"Hospital\", \"Physician Office\"]\n self.df_mas_lab_data = self.df_mas_lab_data[\n self.df_mas_lab_data[\"FACILITY_TYPE\"].isin(facility_type_keep_list)\n ]\n\n # Make everything a string and remove trailing and leading whitespaces\n self.df_mas_lab_data = self.df_mas_lab_data.astype(str)\n self.df_mas_lab_data = self.df_mas_lab_data.applymap(\n lambda x: x.strip() if isinstance(x, str) else x\n )\n\n print_banner(\"Computing all the Data\")\n print(f\"{len(self.df_mas_lab_data)} original master CLIA labs...\")\n\n # Grab other inputed files to make new data file to compare with\n self.df_new_lab_data = pd.concat(\n [\n pd.read_csv(file, names=self.columns, header=None, dtype=str, usecols=self.columns)\n for file in self.new_files\n ]\n )\n\n # Probably not needed for the new data but just in case:\n # Delete rows, where column FACILITY_TYPE != Independent, Hospital,\n # Physician Office\n self.df_new_lab_data = self.df_new_lab_data[\n self.df_new_lab_data[\"FACILITY_TYPE\"].isin(facility_type_keep_list)\n ]\n\n # Make everything a string and remove trailing and leading whitespaces\n self.df_new_lab_data = self.df_new_lab_data.astype(str)\n self.df_new_lab_data = self.df_new_lab_data.applymap(\n lambda x: x.strip() if isinstance(x, str) else x\n )\n\n print(f\"{len(self.df_new_lab_data)} inputted CLIA labs for comparison...\")", "def parse_directory_of_series_files(self):\n if self.series_base_dir is None or len(self.series_file_list) < 1:\n self.logger.warn('Fatal: Base Directory not set %s')\n raise Exception('Error Base Directory not set')\n\n self.logger.info('Parsing dir of files from %s' % self.series_base_dir)\n\n self.ref_series_df = pd.DataFrame([], columns=['SERIES_ID', 'SERIES_SEQ_ID', 'CONTEXT',\n 'FRAG', 'MOL_ID', 'ACTIVITY'])\n\n required_col = ['SERIES_ID', 'SERIES_SEQ_ID', 'CONTEXT', 'FRAG', 'MOL_ID', 'ACTIVITY']\n max_series_id = 0\n\n for series_file in self.series_file_list:\n\n # print series_file\n temp_df = pd.read_csv(series_file) # , index_col=False)\n # print temp_df.columns\n\n # sanity check the data table for the columns we need\n for col in required_col:\n if col not in temp_df.columns:\n raise Exception(\"Input CSV %s does not have required columns: %s\" % (series_file, col))\n\n # re-sequence the series ID's\n if max_series_id == 0:\n max_series_id = temp_df['SERIES_ID'].max()\n else:\n max_series_id = self.ref_series_df['SERIES_ID'].max()\n # print max_series_id\n\n temp_df['SERIES_ID'] = temp_df['SERIES_ID'] + max_series_id\n temp_df['SOURCE_FILE'] = os.path.basename(series_file)\n\n # py2>3 explicit sort=False added\n self.ref_series_df = self.ref_series_df.append(temp_df, sort=False)\n self.logger.info('Appended dataframe shape %s to master dataframe %s' %\n (str(temp_df.shape), str(self.ref_series_df.shape)))\n # print ('Appended dataframe shape %s to master dataframe %s' % (str(temp_df.shape),\n # str(self.ref_series_df.shape)))\n # print self.ref_series_df['SERIES_ID'].max()\n\n self.series_comparison_df = self.ref_series_df", "def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)", "def parseGroupsFileToDictOfChilden(groups_file):\n return parseGroupsFileToDict(groups_file, \"children\")", "def load(fnames, tag=None, inst_id=None):\n\n all_data = []\n\n # Dst data is actually stored by year but users can load by day.\n # Extract the actual dates from the input list of filenames as\n # well as the names of the actual files.\n fdates = []\n ufnames = []\n for filename in fnames:\n fdates.append(dt.datetime.strptime(filename[-10:], '%Y-%m-%d'))\n ufnames.append(filename[0:-11])\n\n # Get unique filenames that map to actual data\n ufnames = np.unique(ufnames).tolist()\n\n # Load unique files\n for fname in ufnames:\n with open(fname) as open_f:\n lines = open_f.readlines()\n idx = 0\n\n # Check if all lines are good\n max_lines = 0\n for line in lines:\n if len(line) > 1:\n max_lines += 1\n\n # Prep memory\n yr = np.zeros(max_lines * 24, dtype=int)\n mo = np.zeros(max_lines * 24, dtype=int)\n day = np.zeros(max_lines * 24, dtype=int)\n ut = np.zeros(max_lines * 24, dtype=int)\n dst = np.zeros(max_lines * 24, dtype=int)\n\n # Read data\n for line in lines:\n if len(line) > 1:\n temp_year = int(line[14:16] + line[3:5])\n if temp_year > 57:\n temp_year += 1900\n else:\n temp_year += 2000\n\n yr[idx:idx + 24] = temp_year\n mo[idx:idx + 24] = int(line[5:7])\n day[idx:idx + 24] = int(line[8:10])\n ut[idx:idx + 24] = np.arange(24)\n temp = line.strip()[20:-4]\n temp2 = [temp[4 * i:4 * (i + 1)] for i in np.arange(24)]\n dst[idx:idx + 24] = temp2\n idx += 24\n\n # Prep datetime index for the data and create DataFrame\n start = dt.datetime(yr[0], mo[0], day[0], ut[0])\n stop = dt.datetime(yr[-1], mo[-1], day[-1], ut[-1])\n dates = pds.date_range(start, stop, freq='H')\n new_data = pds.DataFrame(dst, index=dates, columns=['dst'])\n\n # Add to all data loaded for filenames\n all_data.append(new_data)\n\n # Combine data together\n data = pds.concat(all_data, sort=True, axis=0)\n\n # Pull out requested days\n data = data.iloc[data.index >= fdates[0], :]\n data = data.iloc[data.index < fdates[-1] + pds.DateOffset(days=1), :]\n\n # Create metadata\n meta = pysat.Meta()\n meta['dst'] = {meta.labels.units: 'nT',\n meta.labels.name: 'Dst',\n meta.labels.notes: tags[tag],\n meta.labels.desc: 'Disturbance storm-time index',\n meta.labels.fill_val: np.nan,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n\n return data, meta", "def FE_add_groupby_features_aggregated_to_dataframe(train,\r\n agg_types,groupby_columns,ignore_variables, test=\"\"):\r\n train_copy = copy.deepcopy(train)\r\n test_copy = copy.deepcopy(test)\r\n if isinstance(groupby_columns, str):\r\n groupby_columns = [groupby_columns]\r\n \r\n for groupby_column in groupby_columns:\r\n train_copy_index = train_copy.index\r\n MGB = My_Groupby_Encoder(groupby_column, agg_types, ignore_variables)\r\n train1 = MGB.fit_transform(train)\r\n addl_cols = left_subtract(train1.columns,train.columns)\r\n train1.index = train_copy_index\r\n train_copy = pd.concat([train_copy,train1[addl_cols]], axis=1)\r\n if isinstance(test, str) or test is None:\r\n pass\r\n else:\r\n test_copy_index = test_copy.index\r\n test1 = MGB.transform(test)\r\n addl_cols = left_subtract(test1.columns,test.columns)\r\n test1.index = test_copy_index\r\n test_copy = pd.concat([test_copy,test1[addl_cols]],axis=1)\r\n ### return the dataframes ###########\r\n return train_copy, test_copy", "def compute_aggregate_weather_data():\n\n # get a list of all the csv files names in the 'weather_data' directory\n files = get_all_csv_files_in_directory('weather_data')\n\n # Todo: if the number of csv files doesn't match the expected value, unzip remaining using the 'os' module\n\n if len(files) == 0:\n\n # Unzip all files in current directory and subdirectories\n print \"unzipping weather files...\"\n os.system(\"unzip 'weather_data/*.zip' -d weather_data\")\n\n\n # Try again to get files\n files = get_all_csv_files_in_directory('weather_data')\n\n # Throw exception if still missing csv files\n if len(files) == 0:\n raise ValueError(\"Missing weather data in csv format in the 'weather_data' directory\")\n\n # convert the list of csv file names to a list of corresponding DataFrames\n dallas_files = filter(lambda file_name : \"KDAL\" in file_name, files)\n houston_files = filter(lambda file_name : \"KHOU\" in file_name, files)\n san_antonio_files = filter(lambda file_name : \"KSAT\" in file_name, files)\n\n print \"Retrieved weather data files...\"\n print \"\\t# of Dallas weather files found: \", len(dallas_files)\n print \"\\t# of Houston weather files found: \", len(houston_files)\n print \"\\t# of San Antonio weather files found: \", len(san_antonio_files)\n\n dallas_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), dallas_files)\n houston_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), houston_files)\n san_antonio_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), san_antonio_files)\n\n dallas_df = pd.concat(dallas_dfs)\n houston_df = pd.concat(houston_dfs)\n san_antonio_df = pd.concat(san_antonio_dfs)\n\n print \"Aggregating all of the weather data...\"\n # fold the list of data frames into a single data frame\n aggregate_df = reduce(lambda df1, df2: pd.merge(df1, df2, on=\"Date\", how=\"outer\"), [dallas_df, houston_df, san_antonio_df]).sort_values(\"Date\")\n\n return aggregate_df", "def group_df(self):\n return self._group_df", "def do_tree(self, args, opts=None):\n global __groupcount\n global __datasetcount\n __groupcount = 0\n __datasetcount = 0\n\n def children(item):\n if isinstance(item, h5py.Dataset):\n return []\n else:\n return [i[1] for i in item.items()]\n\n def format(item):\n name = os.path.basename(item.name)\n if name == '':\n name = '/'\n if isinstance(item, h5py.Dataset):\n if opts.shape:\n name = name + ' ' + str(item.shape)\n global __datasetcount\n __datasetcount += 1\n elif isinstance(item, h5py.Group):\n global __groupcount\n __groupcount += 1\n return name\n\n if len(args) == 0:\n args.append('')\n group = self.explorer.group(args[0])\n tree_format.print_tree(group, format, children)\n print('{} groups, {} datasets'.format(__groupcount - 1, __datasetcount))" ]
[ "0.6541141", "0.60830426", "0.6062187", "0.59850407", "0.5894021", "0.5629577", "0.56196886", "0.5457541", "0.5436698", "0.5417699", "0.5383131", "0.53697276", "0.5362166", "0.5328199", "0.5324665", "0.5262586", "0.52602506", "0.5259905", "0.52521104", "0.5234652", "0.5222199", "0.5219996", "0.521683", "0.520833", "0.5199812", "0.51879185", "0.5141566", "0.51389223", "0.5132139", "0.5128927", "0.5114995", "0.50869745", "0.5078696", "0.50513613", "0.50509065", "0.50418", "0.504162", "0.5036773", "0.50238335", "0.5009658", "0.5002019", "0.49969888", "0.49926063", "0.49803746", "0.4967131", "0.4964224", "0.49622256", "0.4952214", "0.49506134", "0.49476537", "0.4946219", "0.49355116", "0.49263057", "0.49245054", "0.49188754", "0.4916321", "0.4912776", "0.49122804", "0.48953584", "0.48920983", "0.48891404", "0.48840472", "0.48807877", "0.48796597", "0.48773077", "0.48757508", "0.48549217", "0.4853006", "0.48510543", "0.48442996", "0.48442683", "0.48366868", "0.48242322", "0.48238146", "0.48171577", "0.4808731", "0.48055866", "0.48054555", "0.48027503", "0.4797591", "0.479627", "0.47946224", "0.47889426", "0.47884047", "0.47873774", "0.47818738", "0.47751787", "0.47736853", "0.47719955", "0.47644216", "0.47463384", "0.4739824", "0.4738569", "0.47368756", "0.4735363", "0.47323215", "0.47081062", "0.46974146", "0.46951765", "0.46840435" ]
0.64353156
1
Reweight JHU and Madgraph signals to different coupling scenarios.
def ac_reweighting(dataframes: dict, reweight: bool, config: dict) -> dict: vbf = pd.concat([df for key, df in dataframes.items() if 'vbf125_JHU' in key]) wh = pd.concat([df for key, df in dataframes.items() if 'wh125_JHU' in key]) zh = pd.concat([df for key, df in dataframes.items() if 'zh125_JHU' in key]) # scale evtwt with appropriate reweighting factor and give a new name for weight, name in config['jhu_ac_reweighting_map']['vbf']: df = vbf.copy(deep=True) df['evtwt'] *= df[weight] dataframes[name] = df for weight, name in config['jhu_ac_reweighting_map']['wh']: df = wh.copy(deep=True) df['evtwt'] *= df[weight] dataframes[name] = df for weight, name in config['jhu_ac_reweighting_map']['zh']: df = zh.copy(deep=True) df['evtwt'] *= df[weight] dataframes[name] = df if reweight: # add couplings together then apply weights ggh = pd.concat([df for key, df in dataframes.items() if 'ggh125_madgraph' in key]) for weight, name in config['mg_ac_reweighting_map']['ggh']: df = ggh.copy(deep=True) df['evtwt'] *= df[weight] dataframes[name] = df else: # just add couplings without weighting dataframes['reweighted_ggH_htt_0PM125'] = pd.concat([ df for key, df in dataframes.items() if 'ggh125_madgraph' in key and 'a1_filtered' in key ]) dataframes['reweighted_ggH_htt_0M125'] = pd.concat([ df for key, df in dataframes.items() if 'ggh125_madgraph' in key and 'a3_filtered' in key ]) dataframes['reweighted_ggH_htt_0Mf05ph0125'] = pd.concat([ df for key, df in dataframes.items() if 'ggh125_madgraph' in key and 'a3int_filtered' in key ]) return dataframes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()", "def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()", "def caculate_signals(self):\n\t\traise NotImplementedError(\"Should implement calculate_signals()\")", "def update_signals(self, event):\r\n\r\n if event.type == 'SIGNAL':\r\n order_event = self.generate_naive_order(event)\r\n self.events.put(order_event)", "def revise():", "def onShutdown(self, connection:MQTTConnection) -> None:", "def update_signal(self, event):\n if event.type == 'SIGNAL':\n order_event = self.generate_naive_order(event)\n self.events.put(order_event)", "def update_signal(self, event):\n if event.type == 'SIGNAL':\n order_event = self.generate_naive_order(event)\n self.events.put(order_event)", "def update_signal(self, event):\n if event.type == 'SIGNAL':\n order_event = self.generate_naive_order(event)\n self.events.put(order_event)", "def signal_vwap(self):\n pass", "def back(self, steps=1):\n raise NotImplementedError", "def signal(self):\n pass", "def sweep_relay():", "def reset_hessian_and_bias(self):\n # reset_shared_var(self.t_H)\n t = self.QUAD_REG\n if len(t.shape) == 1:\n self.t_H.set_value(np.diag(self.QUAD_REG))\n elif len(t.shape) == 2:\n self.t_H.set_value(self.QUAD_REG)\n else:\n raise ValueError('Invalid quad_reg shape')\n\n reset_shared_var(self.t_B)", "def mv_step(self):\n # def mv_all(self):\n self.device_reg_data &= ~(0x1 << 3)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def connectionLost(self, reason):\n\n WampServerProtocol.connectionLost(self, reason)\n\n for model in self.cpuModels:\n model.timer.stop()\n\n self.cpuModels = []", "def multipleSwitchBack(house, currentH, randomH, b, howMany):\n\n if currentH != \"NOT CONNECTED!\":\n switch(house, currentH)\n\n else:\n house.distance = 0\n house.connection.capacity += house.output\n house.connection.connectedHouses.remove(house)\n house.connection = \"NOT CONNECTED!\"\n\n for i in range(howMany):\n switch(randomH[i], b)", "def back_patch(self, *args, **kwargs):\n self.pb[self.ss_i(0)] = \"JPF\", _m(self.ss_i(1)), _m(self.pc)\n self.pop(2)", "def restore_signal_handlers(cls):\n signals = cls.__signal_handlers.keys()\n for sig in signals:\n try:\n signal.signal(sig, cls.__signal_handlers[sig])\n except Exception as e:\n pass\n cls.__signal_handlers = {}", "def _repackage(self):\n self.hidden = _var(self.hidden.data)\n if self.G.ntype == \"lstm\":\n self.state = _var(self.state.data)", "def _revert(self):\n self.release_from_output(\"data\")\n # delete ONA submissions on ONA", "def backward(self):\n raise NotImplementedError", "def clearsignals(self):\n self._panic_mode = \"deactivated\"\n self._armstate = STATE_ALARM_DISARMED\n self.immediate = set()\n self.delayed = set()\n self.ignored = self._allsensors.copy()\n self._timeoutat = None", "def _data_move_out_mc_on_h():\n\n pass", "def handle_warm_resets():\n\n # If we're in USB reset, we're actively receiving warm reset signaling; and we should reset\n # to the Rx.Detect.Reset state.\n with m.If(self.in_usb_reset):\n transition_to_state(\"Rx.Detect.Reset\")", "def jtag_enter_test_logic_reset(self):\n self._probe.swj_sequence(8, 0xff)", "def pibooth_reset(cfg, hard):", "def handle_switch_give_up(self, message):\n # TODO: ADD timer and close\n if message.type == 11 and not self.giving_up:\n #print \"giving up control\"\n ########## Evaluation ######\n print \"Switch migrated: \", self.last_xid\n ########################\n if self.buffer:\n self.handle_write()\n if self.controller.buffer:\n self.controller.handle_write()\n self.giving_up = True\n # Keep processing replies, they are not sent to other controllers.\n if message.type in [3, 6, 8, 19, 21, 23, 25]:\n self.controller.buffer.append(message)", "def turnOffMotors(self) -> None:\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)", "def turnOffMotors(self):\n self.mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)", "def mv_all(self):\n # def mv_step(self):\n self.device_reg_data &= ~(0x1 << 2)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def REBpowerup(self):\n #specific to REB1\n self.cabac_reset()\n\n self.load_sequencer()\n #sets the default sequencer clock states to 0\n self.fpga.send_function(0, fpga0.Function( name=\"default state\", timelengths={0: 2, 1: 0}, outputs={0: 0, 1: 0}))\n\n print(\"REB ready to connect to CCD\")", "def disarm(self):\n pass", "def solve_buses(prepared_buses):\n T, c = functools.reduce(combine_signals, prepared_buses)\n return T - c", "def revert(self, *args, **kwargs):", "def post_backward_discriminator(self):\n pass", "def calculate_signals(self, event: MarketEvent):\n for symbol, bars in event.symbol_data.items():\n if not self.bought[symbol]:\n signal = SignalEvent(bars[-1].symbol, bars[-1].time, 'LONG')\n self.events.add_event(signal)\n self.bought[symbol] = True", "def pswitchoff(chan) :\n s.phaseSwitching(False, chan)", "def end_switch(self, *args, **kwargs):\n self.pb[self.ss_i(1)] = \"JP\", _m(self.pc)\n self.pop(2)\n self.fill_breaks(*args, **kwargs)", "def convert_back(self):\n pass", "def lost_connection(self):\n\t\tfor light in OUTPUT.UP_LIGHTS + OUTPUT.DOWN_LIGHTS:\n\t\t\tif light != -1:\n\t\t\t\tio.set_bit(light, 0)\n\t\tif self.orderQueue.has_orders():\n\t\t\tself.orderQueue.delete_all_orders(exclude=ORDERDIR.IN)", "def _restore(self, graph):\n raise NotImplementedError()", "def signal(self, args):\n pass", "def tearDown(self):\n self.event_out.unregister()\n self.pub_current_joints.unregister()\n self.component_output.unregister()", "def backward(self):\n self.units = self._units_history.pop()\n self._backward()\n # We must set the utop to previous state immediately, because the utop could be other gate's input unit\n # And other gate's backward could be called before this gate's backward\n self._utop_history.pop()\n if self._utop_history:\n self.utop = self._utop_history[-1]", "def turn_off_motors():\n MOTOR_HAT.release_motors()", "def __init__(self, trap=2.5*10**16, Keq=1.0*10**17,\n EHdecay=1.0*10**-10, Etrap=2.0*10**-10, FHloss=8.0*10**-12,\n G3decay = 0, step=200*ps, pretime=2, reprate=80000000,\n verbose=False, trackQ=False, scalar=1, Gdecay=0, GHdecay=0,\n tolerance=0.005, G2decay=0. ,Gescape=1., Gform=1., G3loss=0.):\n # Some other variables used\n self.tolerance = tolerance\n self.scalar = scalar\n self.verbose = verbose\n self.reprate = reprate\n self.duration = 1.00 / reprate\n self.step = step\n self.steps = int(self.duration / self.step)\n self.powers = []\n self.pretime = pretime\n # Variables which hold state densities\n self.exciton = []\n self.hole = []\n self.electron = []\n self.trap = (trap) # Total number of traps\n self.filled = [] # Filled traps\n self.signal = []\n self.xsignal = []\n self.ehsignal = []\n self.xloss = []\n self.tloss = []\n self.pulses = []\n self.qk = []\n self.trackQ = trackQ\n # Rate and equilibrium constants, corrected for time step size\n self.Keq = Gescape/Gform # Equilibrium constant for X<-->e+h\n self.EHdecay = (EHdecay * step) # e+h->ground\n self.Etrap = (Etrap * step) # e+trap->filled\n self.FHloss = (FHloss * step) # filled+h->ground\n self.Gdecay = Gdecay * step\n self.G2decay = G2decay * step\n self.G3decay = G3decay * step\n self.GHdecay = GHdecay * step\n self.Gescape = Gescape * step\n self.G3loss = G3loss * step\n self.Gform = Gform * step", "def backpropagating(self): \n\n ######################### Configure the sensor inputs given the movement of the agent ######################### \n sensors_result_N = self.agent.sensors(self, direction=3) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(3)+[int(self.agent.get_previous_collision())]\n sensors_result_O = self.agent.sensors(self, direction=2) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(2) + [int(self.agent.get_previous_collision())]\n sensors_result_S = self.agent.sensors(self, direction=1) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(1) + [int(self.agent.get_previous_collision())]\n sensors_result_E = self.agent.sensors(self, direction=0) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(0) + [int(self.agent.get_previous_collision())]\n\n input_nn_N = np.asarray(sensors_result_N).astype(int) # input when the Nord action is performed \n input_nn_O = np.asarray(sensors_result_O).astype(int) # input when the West action is performed\n input_nn_S = np.asarray(sensors_result_S).astype(int) # input when the South action is performed\n input_nn_E = np.asarray(sensors_result_E).astype(int) # input when the West action is performed\n\n l_input = [input_nn_E.reshape(1,145),input_nn_S.reshape(1,145),input_nn_O.reshape(1,145),input_nn_N.reshape(1,145)]\n ######################### Configure the sensor inputs given the movement of the agent #########################\n\n print(\"The reward in baskpropagating is %f\" %(self.agent.reward) ) \n parameters = [self.gamma, self.agent.reward]\n Ui = self.U_list[self.agent.get_previousAction().index(1)]\n\n if not self.end:\n U_list_y = [self.nn.predict(input_nn_E.reshape(1,145)),\\\n self.nn.predict(input_nn_S.reshape(1,145)),\\\n self.nn.predict(input_nn_O.reshape(1,145)),\\\n self.nn.predict(input_nn_N.reshape(1,145))] \n #print(U_list_y)\n maxU = np.max(U_list_y)\n #print(np.max(U_list_y))\n index_input_maxU = np.argmax(U_list_y) # the input given for the backprogating is the one with the maximum utility\n input_target = l_input[index_input_maxU] # The input target with the max utility, add to the tuple given during the experience replay\n uprime = self.agent.reward + self.gamma * maxU # input of the utility with the best value\n \n else:\n uprime = self.agent.reward\n input_target = np.array(None)\n \n action = self.agent.get_previousAction().index(1)\n input_nn = self.input_list[action]\n ##### Add to the lesson the action chose in order to go the next state, \n ##### the next state after to have performed the action, and the reward given\n if(self.action_proba[action] > 0.01): # the Pl minimum to choose the action corresponding to the action policy, cf to the paper part experience replay\n #next_states = [copy.deepcopy(input_nn_E).reshape(1,145), copy.deepcopy(input_nn_S).reshape(1,145), copy.deepcopy(input_nn_O).reshape(1,145), copy.deepcopy(input_nn_N).reshape(1,145)]\n self.memory.append((input_nn,action,np.asarray(copy.deepcopy(l_input)),self.agent.reward)) # We add the experiment to the memory of the agent \n \n ############################\n self.nn.train_one_step_other(input_nn,uprime)\n #self.nn.train(input_nn,tf.convert_to_tensor([[uprime]])) # use the method fit to train the neural network", "def signal_handler(*args):\n if station:\n station.shutdown()", "def setup(self):\n\t\tif self.hasSignalModule and not self.signalsRegistered:\n\t\t\t# Jython does not support all signals, so we only use\n\t\t\t# the available ones\n\t\t\tsignals = ['SIGINT', 'SIGHUP', 'SIGABRT', 'SIGQUIT', 'SIGTERM']\n\t\t\timport signal\n\t\t\tfor sig in signals:\n\t\t\t\ttry:\n\t\t\t\t\tsignal.signal(getattr(signal, sig), self._shutdown)\n\t\t\t\t\tself.signalsRegistered.append(sig)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tLogger.Err(\"[!] monitoring.Signals._registerSignals:%s %s\\n\" % (sig, e))", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass", "def dummyF(self, arg, signal, sender):\n \n if self.isDead == True:\n return\n \n \n #print \"DUMMY\"\n if self.aborting:\n self.aborting = False\n self.isActive = False\n self.__orderQueue.popleft()\n del self.slaveTalk\n self.slaveTalk = None\n# self.__orderQueue.popleft()\n try:\n self.releaseLock()\n except:\n pass\n #print \"No lock, actually.\" \n dispatcher.send('SLAVE_PARSE_QUEUE', self, '')", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def _unsubscribe(self, signal):\n while signal in self._downstream:\n self._downstream.remove(signal)\n while signal in self._downstream_reconnect:\n self._downstream_reconnect.remove(signal)", "def reset_desired_frames(self):\n self.kin.frames = self.root\n self.kin.active_joint_names = self.get_actuated_joint_names()", "def connect_backwards(self):\n\n for n in self.nodes:\n n.receives_from = []\n\n for n1 in self.nodes:\n for n2 in n1.sends_to:\n n2.receives_from.append(n1)", "def signal(self, emission, signal, source):\n logger.info(\"tele2.py:signal() emmision: %s, signal: %s, source: %s\", \n str(emission), str(signal), str(source))\n self.window.emit('back')", "def p_actionDecoder(params, substep, _3, s):\n uniswap_events = params['uniswap_events']\n\n \n prev_timestep = s['timestep']\n if substep > 1:\n prev_timestep -= 1\n \n # skip the first two events, as they are already accounted for \n # in the initial conditions of the system\n if params[\"backtest_mode\"]:\n t = prev_timestep + 1 \n else:\n t = prev_timestep\n \n action = {\n 'eth_sold': 0,\n 'tokens_sold': 0,\n 'eth_deposit': 0,\n 'token_deposit': 0,\n 'UNI_burn': 0, \n 'UNI_pct': 0,\n 'fee': 0,\n 'conv_tol': 0,\n 'price_ratio': 0\n }\n\n #Event variables\n if params[\"backtest_mode\"]:\n event = uniswap_events.iloc[t]['event']\n action['action_id'] = event\n else:\n #signal = params['extrapolated_signals'][t]['ratio']\n #I_t, O_t, I_t1, O_t1, delta_I, delta_O, action_key = agent_action(signal, s)\n I_t, O_t, I_t1, O_t1, delta_I, delta_O, action_key = s['Action']['I_t'], s['Action']['O_t'], s['Action']['I_t1'], s['Action']['O_t1'], s['Action']['delta_I'], s['Action']['delta_O'], s['Action']['action_key']\n if action_key == \"eth_sold\":\n event = 'tokenPurchase'\n else:\n event = 'ethPurchase'\n action['action_id'] = event\n\n\n # Swap Event\n if event in ['tokenPurchase', 'ethPurchase']:\n # action_key is either `eth_sold` or `token_sold`\n if params[\"backtest_mode\"]:\n I_t, O_t, I_t1, O_t1, delta_I, delta_O, action_key = get_parameters(uniswap_events, event, s, t)\n\n # Classify actions based on trading heuristics\n # N/A case\n if params['retail_precision'] == -1:\n action[action_key] = delta_I\n # Convenience trader case\n elif classifier(delta_I, delta_O, params['retail_precision']) == \"Conv\":\n calculated_delta_O = int(get_output_amount(delta_I, I_t, O_t, params))\n if calculated_delta_O >= delta_O * (1-params['retail_tolerance']):\n action[action_key] = delta_I\n else:\n action[action_key] = 0\n #action['price_ratio'] = delta_O / calculated_delta_O\n # Arbitrary trader case\n else: \n P = I_t1 / O_t1\n actual_P = I_t / O_t\n if(actual_P > P):\n if params[\"backtest_mode\"]:\n I_t, O_t, I_t1, O_t1, delta_I, delta_O, action_key = get_parameters(uniswap_events, reverse_event(event), s, t)\n P = I_t1 / O_t1\n actual_P = I_t / O_t\n delta_I = get_delta_I(P, I_t, O_t, params)\n delta_O = get_output_amount(delta_I, I_t, O_t, params)\n if(unprofitable_transaction(I_t, O_t, delta_I, delta_O, action_key, params)):\n delta_I = 0\n action[action_key] = delta_I\n else:\n delta_I = get_delta_I(P, I_t, O_t, params)\n delta_O = get_output_amount(delta_I, I_t, O_t, params)\n if(unprofitable_transaction(I_t, O_t, delta_I, delta_O, action_key, params)):\n delta_I = 0\n action[action_key] = delta_I\n \n \"\"\"for key in ['eth_sold', 'tokens_sold', 'eth_deposit', 'token_deposit', 'price_ratio']:\n if action[key] != 0:\n print(key)\n print(action[key])\"\"\"\n #print(sum(action.values()))\n elif event == 'mint':\n delta_I = uniswap_events['eth_delta'][t]\n delta_O = uniswap_events['token_delta'][t]\n UNI_delta = uniswap_events['UNI_delta'][t]\n UNI_supply = uniswap_events['UNI_supply'][t-1]\n\n action['eth_deposit'] = delta_I\n action['token_deposit'] = delta_O\n action['UNI_mint'] = UNI_delta\n action['UNI_pct'] = UNI_delta / UNI_supply\n elif event == 'burn':\n delta_I = uniswap_events['eth_delta'][t]\n delta_O = uniswap_events['token_delta'][t]\n UNI_delta = uniswap_events['UNI_delta'][t]\n UNI_supply = uniswap_events['UNI_supply'][t-1]\n if UNI_delta < 0:\n action['eth_burn'] = delta_I\n action['token_burn'] = delta_O\n action['UNI_burn'] = UNI_delta\n action['UNI_pct'] = UNI_delta / UNI_supply\n del uniswap_events\n return action", "def off(self):", "def signal_oi(self):\n pass", "def transact(self):", "def transact(self):", "def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()", "def offload(self):\n self.pulley(\"down\")\n time.sleep(0.97)\n self.pulley(\"stop\")\n self.pusher_activate()\n self.pulley(\"up\")\n time.sleep(1.1)\n self.pulley_activate()", "def __del__(self):\n self.DcMotor.run(Adafruit_MotorHAT.RELEASE) # changed rightMotor to DcMotor , RFMH_2019_02_28\n del self.motorhat", "def backward(self, g, lx):\n if isinstance(g, sequence._Seq):\n self._Matr__c_elem().backward(g._Seq__c_elem(),lx._Lexique__c_elem())\n elif isinstance(g, _Matr):\n self._Matr__c_elem().backward(g._Matr__c_elem(),lx._Lexique__c_elem())\n self._Matr__maj()", "def backward(self, *output_grads):\n raise NotImplementedError", "def _handleUpstreamUnready(self, slot):\n if self.meta._ready:\n self.meta._ready = False\n self._sig_unready(self)", "def disconnect(self):\n for signal, models in six.iteritems(self._registry):\n for model, keys in six.iteritems(models):\n signal.disconnect(sender=model, weak=False, dispatch_uid=signal)\n self._registry = {}", "def up(self, event):\n event.widget.unbind (\"<B1-Motion>\")\n event.widget.unbind (\"<ButtonRelease-1>\")\n self.diag.update_arrows()", "def unregister_signals(self):\n for _, callback in self.signal_callbacks:\n Signal.unsubscribe(self, callback)", "def __exit__(self, *args: Any):\n\n set_spow_enable(self._previous_state)", "def handler(signum, frame):\n m.signal()", "def caculate_signals(self, event):\n\t\tif event.type == 'MARKET':\n\t\t\tfor s in self.symbol_list:\n\t\t\t\tbars = self.bars.get_latest_bars(s, N=1)\n\t\t\t\tif bars is not None and bars != []:\n\t\t\t\t\tif self.bought[s] == False:\n\t\t\t\t\t\t# (Symbol, Datetime, Type = LONG, SHORT or EXIT)\n\t\t\t\t\t\tsignal = SignalEvent(bars[0][0], bars[0][1], 'LONG')\n\t\t\t\t\t\tself.events.put(signal)\n\t\t\t\t\t\tself.bought[s] = False", "def revert(self, fgraph, checkpoint):\r\n h = self.history[fgraph]\r\n self.history[fgraph] = None\r\n while len(h) > checkpoint:\r\n f = h.pop()\r\n f()\r\n self.history[fgraph] = h", "def backward(self, top, propagate_down, bottom):\n\t\tpass", "def clear_state():\n for name, signal in _signals.items():\n signal._clear_state()", "def L_model_backward(AL, Y, caches):\n pass", "def end_and_close(self):\n self.sim._model.swmm_end()\n self.sim._model.swmm_close()\n pass", "async def test_signal_repetitions_alternation(hass: HomeAssistant, monkeypatch) -> None:\n config = {\n \"rflink\": {\"port\": \"/dev/ttyABC0\"},\n DOMAIN: {\n \"platform\": \"rflink\",\n \"devices\": {\n \"protocol_0_0\": {\"name\": \"test\", \"signal_repetitions\": 2},\n \"protocol_0_1\": {\"name\": \"test1\", \"signal_repetitions\": 2},\n },\n },\n }\n\n # setup mocking rflink module\n _, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)\n\n await hass.services.async_call(\n DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f\"{DOMAIN}.test\"}\n )\n await hass.services.async_call(\n DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f\"{DOMAIN}.test1\"}\n )\n\n await hass.async_block_till_done()\n\n assert protocol.send_command_ack.call_args_list[0][0][0] == \"protocol_0_0\"\n assert protocol.send_command_ack.call_args_list[1][0][0] == \"protocol_0_1\"\n assert protocol.send_command_ack.call_args_list[2][0][0] == \"protocol_0_0\"\n assert protocol.send_command_ack.call_args_list[3][0][0] == \"protocol_0_1\"", "def restore(self):\n self.igate.restore()\n self.fgate.restore()\n self.ogate.restore()\n super(LSTM, self).restore()", "def post_process_trap(): \n #################### 0) assign internal values #################### \n from project_parameters import trapType,debug,trapFile,name,driveAmplitude,driveFrequency,Omega,dcplot,weightElectrodes,coefs,ax,az,phi,save,scale\n #from all_functions import find_saddle,plot_potential,dc_potential,set_voltages,exact_saddle,spher_harm_bas,spher_harm_exp,pfit,plotN\n import pickle\n\n with open(trapFile,'rb') as f:\n trap = pickle.load(f)\n\n qe = trap.configuration.charge\n mass = trap.configuration.mass\n Zval = trap.configuration.position\n r0 = trap.configuration.r0\n RFampl = driveAmplitude \n V0 = mass*(2*np.pi*Omega)**2*(r0*10**-3)**2/qe\n X,Y,Z=trap.instance.X,trap.instance.Y,trap.instance.Z \n data = trap.configuration\n dcVoltages = set_voltages()\n ne = len(weightElectrodes)\n E = trap.instance.E\n out = trap.configuration\n if debug.post_process_trap:\n print dcVoltages,np.max(dcVoltages)#np.sum(abs(dcVoltages))\n plotN(dcVoltages,trap,'set DC voltages') \n Vdc = dc_potential(trap,dcVoltages,E)\n #[IDC,JDC,KDC] = find_saddle(Vdc,X,Y,Z,3,Zval) \n #[XDC,YDC,ZDC] = exact_saddle(Vdc,X,Y,Z,3,Zval)\n #XDC,YDC,ZDC = X[IDC],150/scale,Z[KDC]\n #print XDC,YDC,ZDC,IDC,JDC,KDC\n #dcbasis,dcscale= spher_harm_bas(XDC,YDC,ZDC,X,Y,Z,4)\n #QQ = spher_harm_exp(Vdc,dcbasis,dcscale) \n #print QQ[0:9].T\n #1) RF Analysis\n print('RF Analysis') \n Vrf = RFampl*data.EL_RF\n [Irf,Jrf,Krf] = find_saddle(Vrf,X,Y,Z,2,Zval)\n if debug.post_process_trap:\n plot_potential(Vrf,X,Y,Z,dcplot,'weighted RF potential','V_{rf} (eV)',[Irf,Jrf,Krf])\n #2) DC Analysis\n print('DC Analysis')\n trap = dc_potential(trap,dcVoltages,E,update=None)\n Vdc = trap.instance.DC\n [Idc,Jdc,Kdc] = find_saddle(Vdc,X,Y,Z,3,Zval) # only used to calculate error at end\n if debug.post_process_trap:\n plot_potential(Vdc,X,Y,Z,'1D plots','full DC potential')\n #3) determine the exact saddles of the RF and DC\n trap = dc_potential(trap,dcVoltages,E)\n Vdc = trap.instance.DC\n print('Determining exact RF saddle...')\n [Xrf,Yrf,Zrf] = exact_saddle(Vrf,X,Y,Z,2,Zval) \n print('Determining exact DC saddle...')\n [Xdc,Ydc,Zdc] = exact_saddle(Vdc,X,Y,Z,3,Zval)\n #4) determine stray field (beginning of justAnalyzeTrap)\n print('Determining compensation due to E field...')\n nx,ny,nz=X.shape[0],Y.shape[0],Z.shape[0]\n x,y,z = np.zeros((nx,ny,nz)),np.zeros((nx,ny,nz)),np.zeros((nx,ny,nz))\n for i in range(nx):\n for j in range(ny):\n for k in range(nz):\n x[i,j,k] = X[i]\n y[i,j,k] = Y[j]\n z[i,j,k] = Z[k]\n VlessE = Vdc-E[0]*x-E[1]*y-E[2]*z\n [Xdc,Ydc,Zdc] = exact_saddle(VlessE,X,Y,Z,3) \n dist = np.sqrt((Xrf-Xdc)**2+(Yrf-Ydc)**2+(Zrf-Zdc)**2) \n #5) call pfit to built teh total field and determine the trap characteristics\n [fx,fy,fz,theta,Depth,Xe,Ye,Ze] = pfit(Vrf,Vdc,X,Y,Z,Irf,Jrf,Krf)#pfit(trap,E,Freq,RFampl)\n print('Stray field is ({0},{1},{2}) V/m.'.format(scale*E[0],scale*E[1],scale*E[2]))\n print('With this field, the compensation is optimized to {} micron.'.format(scale*dist))\n print('RF saddle: ({0},{1},{2})\\nDC saddle ({3},{4},{5}).'.format(Xrf,Yrf,Zrf,Xdc,Ydc,Zdc)) \n if debug.trap_depth:\n print('The trap escape position is at ({0},{1},{2}) microns, for a trap depth of {3} mV'.format(Xe*scale,Ye*scale,Ze*scale,Depth*scale))\n print('The trap frequencies are fx = {0} MHz, fy = {1} MHz, and fz = {2} MHz'.format(fx*10**-6,fy*10**-6,fz*10**-6))\n #6) Sanity testing; quality check no longer used\n if debug.post_process_trap:\n rfbasis,rfscale= spher_harm_bas(Xrf,Yrf,Zrf,X,Y,Z,2)\n Qrf = spher_harm_exp(Vrf,rfbasis,rfscale) \n if np.sqrt((Xrf-Xdc)**2+(Yrf-Ydc)**2+(Zrf-Zdc)**2)>0.008: \n print('Expanding DC with RF for saniy checking.')\n Qdc = spher_harm_exp(Vdc,rfbasis,rfscale) \n else:\n print('Expanding DC without RF for sanity checking.')\n dcbasis,dcscale= spher_harm_bas(Xdc,Ydc,Zdc,X,Y,Z,2)\n Qdc = spher_harm_exp(Vdc,dcbasis,dcscale) \n Arf = 2*np.sqrt( (3*Qrf[7])**2+(3*Qrf[8])**2 )\n Thetarf = 45*(Qrf[8]/abs(Qrf[8]))-90*np.arctan((3*Qrf[7])/(3*Qrf[8]))/np.pi\n Adc = 2*np.sqrt( (3*Qdc[7])**2+(3*Qdc[8])**2 )\n Thetadc = 45*(Qrf[8]/abs(Qrf[8]))-90*np.arctan((3*Qdc[7])/(3*Qdc[8]))/np.pi\n out.E = E\n out.miscompensation = dist\n out.ionpos = [Xrf,Yrf,Zdc]\n out.ionposIndex = [Irf,Jrf,Krf]\n out.frequency = [fx,fy,fz]\n out.theta = theta\n out.trap_depth = Depth/qe \n out.escapepos = [Xe,Ye,Ze]\n out.Quadrf = 2*np.array([Qrf[7]*3,Qrf[4]/2,Qrf[8]*6,-Qrf[6]*3,-Qrf[5]*3])\n out.Quaddc = 2*np.array([Qdc[7]*3,Qdc[4]/2,Qdc[8]*6,-Qdc[6]*3,-Qdc[5]*3])\n out.Arf = Arf\n out.Thetarf = Thetarf\n out.Adc = Adc\n out.Thetadc = Thetadc\n T = np.array([[2,-2,0,0,0],[-2,-2,0,0,0],[0,4,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0, 0,0,0,1]])\n Qdrf = out.Quadrf.T\n Qddc = out.Quaddc.T\n out.q = (1/V0)*T*Qdrf\n out.alpha = (2/V0)*T*Qddc\n out.Error = [X[Idc]-Xdc,Y[Jdc]-Ydc,Z[Kdc]-Zdc]\n #7) update the trapping field data structure with instance attributes\n trap.configuration=out\n trap.instance.driveAmplitude = driveAmplitude\n trap.instance.driveFrequency = driveFrequency\n trap.instance.coefs = coefs\n trap.instance.ax = ax\n trap.instance.az = az\n trap.instance.phi = phi\n trap.instance.ppt = True\n trap.instance.out = out\n if save==True:\n print('Saving '+trapFile+' as a data structure...')\n with open(trapFile,'wb') as f:\n pickle.dump(trap,f)\n return 'post_proccess_trap complete' #out # no output needed really", "def fixup(self):\n raise Exception(\"Fixup not implemented yet!\")" ]
[ "0.5252793", "0.5252793", "0.52396435", "0.51686776", "0.50035083", "0.49548638", "0.49380127", "0.49380127", "0.49380127", "0.49363765", "0.4932626", "0.49048656", "0.4897503", "0.48733047", "0.48726597", "0.48689917", "0.48130462", "0.47945935", "0.47870296", "0.47869524", "0.47714463", "0.4759333", "0.47581527", "0.47552687", "0.47478953", "0.47312957", "0.47220507", "0.47202843", "0.47166568", "0.47118482", "0.47090745", "0.4700487", "0.4699122", "0.46861404", "0.46787995", "0.4675674", "0.46635443", "0.46607634", "0.46589172", "0.46544942", "0.46476373", "0.46293223", "0.4625616", "0.46215817", "0.46190423", "0.46070406", "0.46040142", "0.4596312", "0.45956928", "0.45873174", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45845574", "0.45840755", "0.4579008", "0.4579008", "0.4579008", "0.45782018", "0.4577858", "0.45766836", "0.45739147", "0.4573439", "0.45700008", "0.45676056", "0.45668215", "0.45668215", "0.45632562", "0.45570022", "0.4554558", "0.45535654", "0.45484564", "0.45362005", "0.45347214", "0.4534459", "0.45315278", "0.45220408", "0.45143995", "0.45139697", "0.45116454", "0.4511636", "0.45108253", "0.45095354", "0.450922", "0.4507147", "0.45059896", "0.45033816", "0.44989115" ]
0.0
-1
Store information for NN training (signal labels, scaled evtwts, and info for standardization.
def nn_preprocess(dataframes: dict) -> dict: from sklearn.preprocessing import MinMaxScaler for name, df in dataframes.items(): if name == 'metadata': continue # store signal label for NN training if 'ggh125' in name or 'vbf125' in name: df['signalLabel'] = np.ones(len(df)) else: df['signalLabel'] = np.zeros(len(df)) # normalize sample weights df['scaled_evtwt'] = MinMaxScaler(feature_range=(1., 2.)).fit_transform(df.evtwt.values.reshape(-1, 1)) return dataframes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def training_info(self):\n pass", "def _training_vars(self):\n self.vars = dict()\n # Temperature params\n self.vars['TInit'] = -1\n self.vars['TDecayRate'] = 0.05\n # Bowl params\n self.vars['q_init'] = 16.58 # initial strength for the bowl\n self.vars['q_max'] = 150.\n #self.vars['q_rate'] = 10.\n # Check if we can improve learning, adjusting this value\n self.vars['bowl_center'] = 0.4\n self.vars['bowl_strength'] = None\n self.vars['beta_min_offset'] = 2\n # Time step params\n self.vars['max_dt'] = 0.01\n self.vars['min_dt'] = 0.0005\n self.vars['dt'] = 0.009\n # Training traces\n self.vars['prev_s'] = None\n self.vars['Harmony_trace'] = None\n self.vars['speed_trace'] = None\n self.vars['ema_trace'] = None\n self.vars['lambda_trace'] = None\n self.vars['temp_trace'] = None\n self.vars['TP_trace'] = None\n self.vars['TPnum_trace'] = None\n self.vars['TP_h_trace'] = None\n self.vars['TP_dist_trace'] = None\n self.vars['S_trace'] = None\n\n if self.custom_settings is not None:\n for key, value in self.custom_settings.items():\n if key in self.vars:\n self.vars[key] = value", "def train_data(self):\n\n return self.__train_data, self.__train_labels", "def _training__(self):\n self.input_size, self.output_size = self.X_train.shape[1], self.y_train.shape[1]\n w1 = np.random.uniform(size=[self.input_size, self.hidden_size])\n b = np.random.uniform(size=[1, self.hidden_size])\n H = self._activation__(np.add(np.matmul(self.X_train, w1), b))\n w2 = np.dot(np.linalg.pinv(H), self.y_train)\n self.model = {\"w1\": w1, \"b\": b, \"w2\": w2}", "def trainNet():", "def create_training(logits):\r\n \r\n\r\n return train_op, loss, label_ph", "def training(self):\n \n best_valid_loss = np.inf\n c = 0\n \n self.train_loader, self.test_loader = self.get_train_test_loaders()\n \n print('Training the {} model with the following architecture:'.format(self.model_name))\n print(summary(self.model, (3, self.image_width, self.image_height)))\n print('*'*100)\n print('Starting the training...')\n print('*'*100)\n \n # Create the model save dir if it already doesn't exist\n if not os.path.exists(self.model_save_dir):\n os.makedirs(self.model_save_dir)\n \n for epoch in range(self.n_epochs):\n\n print(f'Epoch: {epoch+1:02}')\n\n start_time = time.time()\n\n train_loss = self.train(self.train_loader)\n valid_loss = self.evaluate(self.test_loader)\n\n epoch_mins, epoch_secs = self.epoch_time(start_time, time.time())\n\n c+=1\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(self.model.state_dict(), os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)))\n c=0\n\n if c>4:\n #decrease lr if loss does not decrease after 5 steps\n self.scheduler.step()\n c=0\n\n print(f'Time: {epoch_mins}m {epoch_secs}s') \n print(f'Train Loss: {train_loss:.3f}')\n print(f'Val Loss: {valid_loss:.3f}')\n print('-'*60)\n print('The best validation loss is', best_valid_loss)\n print('*'*100)", "def on_train_begin(self, logs={}):\n self.losses = []\n self.accuracies = []", "def _train(self):\n self.train_acc.reset_states()\n self.val_acc.reset_states()\n self.train_loss.reset_states()\n self.val_loss.reset_states()\n\n self.train_ds.shuffle(buffer_size=1000)\n for idx, (x,y) in enumerate(self.train_ds):\n self.tf_train_step(x, y)\n\n for x,y in self.val_ds:\n self.tf_val_step(x, y)\n\n # It is important to return tf.Tensors as numpy objects.\n return {\n \"epoch\": self.iteration,\n \"loss_train\": self.train_loss.result().numpy(),\n \"loss_val\": self.val_loss.result().numpy(),\n \"acc_train\": self.train_acc.result().numpy(),\n \"acc_val\": self.val_acc.result().numpy(),\n }", "def __init__(self):\n #self.NN = Neural_Network()\n y_vals = pd.read_csv('training_data_y.csv')\n x_vals_original = pd.read_csv('training_data_x.csv')\n x_vals_original.columns = ['R1', 'G1', 'B1', 'W1', 'R2', 'G2', 'B2', 'W2', 'R3', 'G3', 'B3', 'W3']\n total_x_train = self.getNewDF_X(x_vals_original)\n total_y_train = self.getNewDF_Y(y_vals) \n #training data is numpy arrays here\n x_arr = np.asarray(total_x_train,dtype=np.float32)\n y_train = np.asarray(total_y_train,dtype=np.float32)\n #convert training data to tensors and scale it\n x_train = torch.tensor((x_arr), dtype=torch.float)\n self.x_train = self.scaleInputTestData(x_train)\n self.y_train = torch.tensor((y_train), dtype=torch.float) / 100", "def getTrainingData(self):\n raise NotImplementedError", "def learn(self):\n epochswin = [] # count the number of wins at every epoch of the network against the preceding version\n epochdraw = [] # count the number of draws at every epoch of the network against the preceding version\n epochswingreedy = [] # count the number of wins against greedy at every epoch\n epochswinrandom = [] # count the number of wins against random at every epoch\n epochsdrawgreedy = [] # count the number of draws against greedy at every epoch\n epochsdrawrandom = [] # count the number of wins against random at every epoch\n epochswinminmax = [] # count the number of wins against minmax at every epoch\n epochsdrawminmax = [] # count the number of draws against minmax at every epoch\n\n\n if self.args.load_model == True:\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \".txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswin.append(word)\n elif index == 1:\n epochdraw.append(word)\n file.close()\n\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \":greedyrandom.txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswingreedy.append(word)\n elif index == 1:\n epochsdrawgreedy.append(word)\n elif index == 2:\n epochswinrandom.append(word)\n elif index == 3:\n epochsdrawrandom.append(word)\n elif index == 4:\n epochswinminmax.append(word)\n elif index == 5:\n epochsdrawminmax.append(word)\n file.close()\n self.loadTrainExamples()\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n print('------ITER ' + str(i) + '------')\n # examples of the iteration\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n eps_time = AverageMeter()\n bar = Bar('Self Play', max=self.args.numEps)\n end = time.time()\n\n for eps in range(self.args.numEps):\n iterationTrainExamples += self.executeEpisode()\n\n # bookkeeping + plot progress\n eps_time.update(time.time() - end)\n end = time.time()\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps + 1,\n maxeps=self.args.numEps,\n et=eps_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td)\n bar.next()\n bar.finish()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(self.trainExamplesHistory),\n \" => remove the oldest trainExamples\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1)\n self.saveTrainExamples(i - 1)\n\n # shuffle examlpes before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n exists = os.path.isfile(filenameBest)\n if exists:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n print('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game,nmcts,pmcts,evaluate=True)\n\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare, False)\n\n pmcts.clear()\n nmcts.clear()\n del pmcts\n del nmcts\n\n print(' ')\n print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if i == 1:\n epochswin.append(pwins)\n epochdraw.append(0)\n\n epochswin.append(nwins)\n epochdraw.append(draws)\n self.writeLogsToFile(epochswin, epochdraw)\n\n ''' Get all the players and then pit them against the network. You need to modify here if you implement \n more players\n '''\n (gp, rp, mp) = self.decidePlayers()\n\n if self.args.parallel == 0:\n\n\n nmcts1 = MCTS(self.game, self.nnet, self.args)\n nmcts2 = MCTS(self.game, self.nnet, self.args)\n nmcts3 = MCTS(self.game, self.nnet, self.args)\n\n arenagreedy = Arena(lambda x: np.argmax(nmcts1.getActionProb(x, temp=0)), gp, self.game,nmcts1)\n arenarandom = Arena(lambda x: np.argmax(nmcts2.getActionProb(x, temp=0)), rp, self.game,nmcts2)\n arenaminmax = Arena(lambda x: np.argmax(nmcts3.getActionProb(x, temp=0)), mp, self.game,nmcts3,evaluate=True)\n\n pwinsminmax, nwinsminmax, drawsminmax = arenaminmax.playGames(self.args.arenaCompare)\n print(\"minmax - \"+str(pwinsminmax)+\" \"+str(nwinsminmax)+\" \"+str(drawsminmax))\n pwinsgreedy, nwinsgreedy, drawsgreedy = arenagreedy.playGames(self.args.arenaCompare)\n print(\"greedy - \"+str(pwinsgreedy)+\" \"+str(nwinsgreedy)+\" \"+str(drawsgreedy))\n pwinsreandom, nwinsrandom, drawsrandom = arenarandom.playGames(self.args.arenaCompare)\n print(\"random - \"+str(pwinsreandom)+\" \"+str(nwinsrandom)+\" \"+str(drawsrandom))\n\n nmcts1.clear()\n nmcts2.clear()\n nmcts3.clear()\n del nmcts1\n del nmcts2\n del nmcts3\n\n else:\n '''\n This will be used if you want to evaluate the network against the benchmarks in a parallel way\n '''\n\n self.args.update({'index': str(i)})\n\n p = self.parallel(self.args.arenaCompare)\n (pwinsminmax, nwinsminmax, drawsminmax) = p[0] # self.parallel(\"minmax\", self.args.arenaCompare)\n (pwinsgreedy, nwinsgreedy, drawsgreedy) = p[1] # self.parallel(\"greedy\",self.args.arenaCompare)\n (pwinsreandom, nwinsrandom, drawsrandom) = p[2] # self.parallel(\"random\",self.args.arenaCompare)\n\n epochsdrawgreedy.append(drawsgreedy)\n epochsdrawrandom.append(drawsrandom)\n epochswinrandom.append(pwinsreandom)\n epochswingreedy.append(pwinsgreedy)\n epochswinminmax.append(pwinsminmax)\n epochsdrawminmax.append(drawsminmax)\n\n self.writeLogsToFile(epochswingreedy, epochsdrawgreedy, epochswinrandom, epochsdrawrandom, epochswinminmax,\n epochsdrawminmax, training=False)\n\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) <= self.args.updateThreshold:\n print('REJECTING NEW MODEL')\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n exists = os.path.isfile(filenameBest)\n if exists:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n\n else:\n print('ACCEPTING NEW MODEL')\n\n filename = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n self.mcts.clear()\n del self.mcts\n self.mcts = MCTS(self.game, self.nnet, self.args, mcts=True) # reset search tree\n print(self.tracker.print_diff())\n self.writeLogsToFile(epochswin, epochdraw, training=True)", "def get_state(self):\n return {\n \"epoch\": self.epoch,\n \"weights\": self.model.get_weights(),\n \"optimizer_weights\": self.model.optimizer.get_weights()\n }", "def on_train_begin(self, logs=None):", "def on_train_begin(self, logs=None):", "def train(self):\n\n # Set the pretrain log\n trlog = {}\n trlog['args'] = vars(self.args)\n trlog['train_loss'] = []\n trlog['val_loss'] = []\n trlog['train_acc'] = []\n trlog['val_acc'] = []\n trlog['train_iou']=[]\n trlog['val_iou']=[]\n trlog['max_iou'] = 0.0\n trlog['max_iou_epoch'] = 0\n\n # Set the timer\n timer = Timer()\n # Set global count to zero\n global_count = 0\n # Set tensorboardX\n writer = SummaryWriter(comment=self.args.save_path)\n\n # Start pretrain\n for epoch in range(1, self.args.pre_max_epoch + 1):\n # Update learning rate\n self.lr_scheduler.step()\n # Set the model to train mode\n self.model.train()\n self.model.mode = 'train'\n # Set averager classes to record training losses and accuracies\n train_loss_averager = Averager()\n train_acc_averager = Averager()\n train_iou_averager = Averager()\n\n # Using tqdm to read samples from train loader\n tqdm_gen = tqdm.tqdm(self.train_loader)\n\n for i, batch in enumerate(tqdm_gen, 1):\n # Update global count number \n global_count = global_count + 1\n if torch.cuda.is_available():\n data, label = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n label = batch[1]\n\n # Output logits for model\n logits = self.model(data)\n # Calculate train loss\n # CD loss is modified in the whole project to incorporate ony Cross Entropy loss. Modify as per requirement.\n #loss = self.FL(logits, label) + self.CD(logits,label) + self.LS(logits,label)\n loss = self.CD(logits,label) \n \n # Calculate train accuracy\n self._reset_metrics()\n seg_metrics = eval_metrics(logits, label, self.args.num_classes)\n self._update_seg_metrics(*seg_metrics)\n pixAcc, mIoU, _ = self._get_seg_metrics(self.args.num_classes).values()\n\n # Add loss and accuracy for the averagers\n train_loss_averager.add(loss.item())\n train_acc_averager.add(pixAcc)\n train_iou_averager.add(mIoU)\n\n # Print loss and accuracy till this step\n tqdm_gen.set_description('Epoch {}, Loss={:.4f} Acc={:.4f} IOU={:.4f}'.format(epoch, train_loss_averager.item(),train_acc_averager.item()*100.0,train_iou_averager.item()))\n \n # Loss backwards and optimizer updates\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update the averagers\n train_loss_averager = train_loss_averager.item()\n train_acc_averager = train_acc_averager.item()\n train_iou_averager = train_iou_averager.item()\n\n writer.add_scalar('data/train_loss(Pre)', float(train_loss_averager), epoch)\n writer.add_scalar('data/train_acc(Pre)', float(train_acc_averager)*100.0, epoch) \n writer.add_scalar('data/train_iou (Pre)', float(train_iou_averager), epoch)\n \n print('Epoch {}, Train: Loss={:.4f}, Acc={:.4f}, IoU={:.4f}'.format(epoch, train_loss_averager, train_acc_averager*100.0,train_iou_averager)) \n \n # Start validation for this epoch, set model to eval mode\n self.model.eval()\n self.model.mode = 'val'\n\n # Set averager classes to record validation losses and accuracies\n val_loss_averager = Averager()\n val_acc_averager = Averager()\n val_iou_averager = Averager()\n\n # Print previous information \n if epoch % 1 == 0:\n print('Best Val Epoch {}, Best Val IoU={:.4f}'.format(trlog['max_iou_epoch'], trlog['max_iou']))\n\n # Run validation\n for i, batch in enumerate(self.val_loader, 1):\n if torch.cuda.is_available():\n data, labels,_ = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n label=labels[0]\n p = self.args.way*self.args.shot\n data_shot, data_query = data[:p], data[p:]\n label_shot,label=labels[:p],labels[p:]\n \n par=data_shot, label_shot, data_query\n logits = self.model(par)\n # Calculate preval loss\n \n #loss = self.FL(logits, label) + self.CD(logits,label) + self.LS(logits,label)\n loss = self.CD(logits,label) \n \n # Calculate val accuracy\n self._reset_metrics()\n seg_metrics = eval_metrics(logits, label, self.args.way)\n self._update_seg_metrics(*seg_metrics)\n pixAcc, mIoU, _ = self._get_seg_metrics(self.args.way).values()\n\n val_loss_averager.add(loss.item())\n val_acc_averager.add(pixAcc)\n val_iou_averager.add(mIoU) \n\n # Update validation averagers\n val_loss_averager = val_loss_averager.item()\n val_acc_averager = val_acc_averager.item()\n val_iou_averager = val_iou_averager.item()\n \n writer.add_scalar('data/val_loss(Pre)', float(val_loss_averager), epoch)\n writer.add_scalar('data/val_acc(Pre)', float(val_acc_averager)*100.0, epoch) \n writer.add_scalar('data/val_iou (Pre)', float(val_iou_averager), epoch) \n \n # Print loss and accuracy for this epoch\n print('Epoch {}, Val: Loss={:.4f} Acc={:.4f} IoU={:.4f}'.format(epoch, val_loss_averager, val_acc_averager*100.0,val_iou_averager))\n\n # Update best saved model\n if val_iou_averager > trlog['max_iou']:\n trlog['max_iou'] = val_iou_averager\n trlog['max_iou_epoch'] = epoch\n print(\"model saved in max_iou\")\n self.save_model('max_iou')\n\n # Save model every 10 epochs\n if epoch % 10 == 0:\n self.save_model('epoch'+str(epoch))\n\n # Update the logs\n trlog['train_loss'].append(train_loss_averager)\n trlog['train_acc'].append(train_acc_averager)\n trlog['val_loss'].append(val_loss_averager)\n trlog['val_acc'].append(val_acc_averager)\n trlog['train_iou'].append(train_iou_averager)\n trlog['val_iou'].append(val_iou_averager)\n\n # Save log\n torch.save(trlog, osp.join(self.args.save_path, 'trlog'))\n\n if epoch % 1 == 0:\n print('Running Time: {}, Estimated Time: {}'.format(timer.measure(), timer.measure(epoch / self.args.max_epoch)))\n writer.close()", "def on_train_begin(self, logs={}):\n self.losses = []\n self.val_losses = []", "def on_train_end(self):", "def train_self(self):\n # for each numeric column, we need to record mean and std for both classes\n for col in self.num_cols:\n self.prob_hub[col] = {}\n for claz in self.class_list:\n mean, std = get_mean_std(self.data[self.data[self.class_column] == claz][col])\n self.prob_hub[col][claz] = (mean, std)\n\n # for each categorical columns, we need to record P(X=x|Y=y)\n for col in self.cat_cols:\n ulist = unique_list(self.data[col])\n self.prob_hub[col] = {}\n stat = self.data.groupby(self.class_column)[col].value_counts() / self.data.groupby(self.class_column)[col].count()\n # for each class\n for claz in self.class_list:\n self.prob_hub[col][claz] = {}\n for uni_element in ulist:\n self.prob_hub[col][claz][uni_element] = stat[claz][uni_element]\n\n self.predict(self.data, True)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def __savePreProcessedData(self):\n np.savetxt(self.X_filename, self.X, delimiter=',')\n np.savetxt(self.y_filename, self.le.fit_transform(self.y), delimiter=',')\n #Need to save the label Enconder to inverse transform later\n joblib.dump(self.le, self.le_filename)\n\n print(\"Saved X and y\")", "def _save_train(self, context):\n last_train = context.user_data['last_train']\n saved_trains: dict = self._saved_trains(context)\n if last_train not in saved_trains.values():\n train_label = Train.from_json(last_train).one_line_description()\n saved_trains[train_label] = last_train\n\n # free memory\n context.user_data['last_train'] = {}", "def store_info(self):\r\n _debug('Protocol: store_info' ) \r\n \r\n #Times\r\n if self.measure_type == '3PL':\r\n self.t_probe_p_s .append(self.t_probe) \r\n self.t_probe_m_s .append(self.t_probe) \r\n if self.measure_type == '4PL':\r\n self.t_probe_p_s .append(self.tp) \r\n self.t_probe_m_s .append(self.tm) \r\n \r\n self.t_pulseSequences_s.append(self.t_pulseSequences)\r\n self.t_process_s .append(self.t_process)\r\n #Total, accumulated, times\r\n self.t_tot_pulseSequences_s.append(self.t_tot_pulseSequences) \r\n self.t_tot_process_s .append(self.t_tot_process) \r\n #Rates\r\n self.Gp_guess_s .append(self.Gp_guess) #Mean of gamma+ \r\n self.Gm_guess_s .append(self.Gm_guess) #Mean of gamma- \r\n self.eGp_guess_s .append(self.eGp_guess) #Uncertainty of gamma+\r\n self.eGm_guess_s .append(self.eGm_guess) #Uncertainty of gamma- \r\n self.cov_GpGm_s .append(self.cov_GpGm) #Covariance of gamma- & gamma- \r\n #Other\r\n self.nb_iteration_s.append(self.iter)\r\n self.R_tot_s .append(self.R_tot)", "def process_sample_train(self):\n raise NotImplementedError", "def _load_training_data(self):\n self._save_training_data()", "def training_metrics(self):\r\n if self._training_metrics is None:\r\n # Builds the per-task metrics and losses.\r\n self._training_metrics = {}\r\n for name, task in self.multi_task.tasks.items():\r\n self._training_metrics[name] = task.build_metrics(training=True)\r\n return self._training_metrics", "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def train(self):\n freq_dist = self.getFreqDist()\n\n self.freq_dist = freq_dist", "def train(self):\n raise NotImplementedError", "def preprocess_train_data(self):\r\n print(\"* Preprocessing training data.\", flush=True)\r\n prep.create_HDF_file(self.C.training_set, is_training_set=True)\r\n\r\n self.print_time_elapsed()", "def train(self):\n\t\traise NotImplementedError", "def __train_hebbs__(self):\n\n rho = self.__get_rho__()\n\n copied_train_data = np.copy(self.train_data)\n\n for curr_train_sample in tqdm(copied_train_data,\n disable=not self.verbose,\n postfix=f'Model training...'):\n\n train_sample_norm = curr_train_sample - rho\n\n assert len(train_sample_norm.shape) == 1, \\\n f'Flatten your input! Now dim is: {train_sample_norm.shape}'\n\n self.weights += np.outer(train_sample_norm, train_sample_norm)\n\n diagonal_values = np.diag(self.weights) # extracts diagonal values from matrix\n diagonal_weights = np.diag(diagonal_values) # creates diagonal matrix from diagonal values for weights\n\n self.weights = self.weights - diagonal_weights\n self.weights = self.weights / len(self.train_data)", "def save_weight_in_epoch(net):\n global weights\n input_layer_weight = net.train_layers[0].weight.copy()\n weights[:, net.epoch + 1:net.epoch + 2] = input_layer_weight", "def __init__(self):\n self.sum_of_node_inputs = 0\n self.output = 0\n self.delta = 0\n self.dp = 0\n self.onehot_label = 0", "def train(self):\n return", "def __init__(self, info):\n self.val = info[\"prediction\"] * info[\"learning_rate\"]\n self.train_size = info[\"ematrix\"].label.shape[0]\n self.avg_target = np.mean(info[\"ematrix\"].label, axis=0)[0]", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def nn_train(df_nn, write=False):\n #features = ['fnlwgt', 'education_num', 'capital_gain', 'capital_loss',\n # 'hours_per_week', 'workclass_num', 'over50K', 'marital_status_num',\n # 'race_num', 'gender_num', 'relationship_num']#, 'y_old']\n scale_num = StandardScaler()\n scale_num.fit(df.loc[:,df.columns != 'y_old'])\n df.loc[:,df.columns != 'y_old'] = scale_num.transform(df.loc[:,df.columns != 'y_old'])\n test_nn = df_nn[df_nn['test'] > 0.5].drop('test', 1)\n data_nn = df_nn[df_nn['test'] <= 0.5].drop('test', 1)\n y_test_nn = test_nn['y_old']\n X_test_nn = test_nn.drop('y_old', 1)\n y_data_nn = data_nn['y_old']\n X_data_nn = data_nn.drop('y_old', 1)\n X_train_nn, X_val_nn, y_train_nn, y_val_nn = train_test_split(\n X_data_nn, y_data_nn, test_size=0.2, random_state=321)\n model_nn = nn_model()\n model_nn.summary()\n hist = model_nn.fit(np.array(X_train_nn), np.array(y_train_nn),\n epochs=150, validation_split=0.25,\n shuffle=True, verbose=0)\n plt.plot(hist.history['acc'])\n plt.plot(hist.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'valid'], loc='upper right')\n plt.show()\n if write:\n model_nn.save(obj_save_path+'model_nn.p')\n # model_nn = load_model(obj_save_path+'model_nn.p')\n return model_nn, X_train_nn, y_train_nn, X_val_nn, y_val_nn, X_test_nn, y_test_nn", "def before_epoch(self):\n if self.trainer._mode == 'train':\n with open(os.path.join(self.root_path, 'metrics.txt'), 'a+') as fout:\n if hasattr(self.trainer, '_metrics'):\n fout.write(\n str(self.trainer._epoch - 1) + '\\t' +\n str(self.trainer._metrics) + '\\n')", "def __train_input_fn(self):\n ## To ensure unbiased training, grab random labels to define batch\n labels = np.random.choice(np.unique(self.labels_train), self.batch_size)\n ## Then grab a random spectrum from each label\n spectra = np.zeros((self.batch_size, len(self.spectra_train[0])))\n for i,l in enumerate(labels):\n good = self.labels_train == l\n idx = np.random.choice(np.sum(good))\n spectra[i] = self.spectra_train[good][idx]\n ## Recast into dictionary for estimator\n features = {'flux': spectra}\n ## Convert labels to integers\n ilabels = [self.label_index_lookup[l] for l in labels]\n return features, ilabels", "def init_metrics(self):\n\n self.metrics = {}\n\n self.metrics['train_loss'] = np.zeros(0)\n self.metrics['test_loss'] = np.zeros(0)\n\n # self.orth_clf = LinearDecoder(self, self.q_, MeanClassifier)\n # self.metrics['train_orthogonality'] = np.zeros(0)\n # self.metrics['test_orthogonality'] = np.zeros(0)\n\n self.metrics['train_parallelism'] = np.zeros((0,self.q_)) \n self.metrics['test_parallelism'] = np.zeros((0,self.q_))", "def train_weak_signals(data, weak_signal_data, num_weak_signal):\n\n train_data, train_labels = data['training_data']\n val_data, val_labels = data['validation_data']\n test_data, test_labels = data['test_data']\n\n n, d = train_data.shape\n\n weak_signal_train_data = weak_signal_data[0]\n weak_signal_val_data = weak_signal_data[1]\n weak_signal_test_data = weak_signal_data[2]\n\n weak_signals = []\n stats = np.zeros(num_weak_signal)\n w_sig_probabilities = []\n w_sig_test_accuracies = []\n weak_val_accuracy = []\n\n\n for i in range(num_weak_signal):\n # fit model\n model = LogisticRegression(solver = \"lbfgs\", max_iter= 1000)\n model.fit(weak_signal_train_data[i], train_labels)\n weak_signals.append(model)\n\n # evaluate probability of P(X=1)\n probability = model.predict_proba(weak_signal_val_data[i])[:, 1]\n score = val_labels * (1 - probability) + (1 - val_labels) * probability\n stats[i] = np.sum(score) / score.size\n w_sig_probabilities.append(probability)\n\n # evaluate accuracy for validation data\n weak_val_accuracy.append(accuracy_score(val_labels, np.round(probability)))\n\n # evaluate accuracy for test data\n test_predictions = model.predict(weak_signal_test_data[i])\n w_sig_test_accuracies.append(accuracy_score(test_labels, test_predictions))\n\n\n model = {}\n model['models'] = weak_signals\n model['probabilities'] = np.array(w_sig_probabilities)\n model['error_bounds'] = stats\n model['validation_accuracy'] = weak_val_accuracy\n model['test_accuracy'] = w_sig_test_accuracies\n\n return model", "def train( self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.features = trainingData[0].keys() # could be useful later\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n for iteration in range(self.max_iterations):\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n print (\"Starting iteration \", iteration, \"...\")\n for i in range(len(trainingData)):#training data\n max = -10000000\n for j in range(len(self.weights)):\n prod = np.dot(self.weights[j], trainingData[i]) #este sería x0 (en la primera vuelta) (xj)\n if (prod > max):\n max=prod #en max guardamos la distancia a la instancia que más cerca está de la que estamos recorriendo\n indclase=j #guardas el índice de la clase a la que predices que pertenece\n\n if(indclase != trainingLabels[i]):\n # recalcular pesos\n self.weights[trainingLabels[i]].__radd__(trainingData[i]) #honek jarraian egiten du gehiketa pisu guztientzat\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n self.weights[indclase].__sub__(trainingData[i]) #honek jarraian egiten du kenketa pisu guztientzat\n\n\n\n\n\n ########################################################################################\n # 1. i es el indice de un ejemplo (un item, f(x) de un ejemplo) del conjunto de entrenamiento.\n # 2. Asi pues, en cada vuelta de este loop se trata un solo ejemplo\n # por cada ejemplo calculareis el producto punto (dotProduct) w*item\n # NOTAS: Recordad que cada ejemplo viene representado por varios rasgos (o features), es decir, es un vector de rasgos, tantos como nos marca el atributo self.features.\n # Asi cada ejemplo es de dimension 1 filas y self.features).\n # La dimension del vector w tambien es self.features, es decir, habra tantos pesos en w_rasgo dentro de w como rasgos haya en cada item de ejemplo\n # Recordad tambien que es una clasificacion multiclase en este caso. Hay tantas clases como nos marca el atributo self.legalLabels\n #########################################################################################", "def info(self):\n return {\n \"learning_rate\": self.learning_rate,\n \"learning_rate_decay\": self.learning_rate_decay,\n \"training_epochs\": self.training_epochs,\n \"batch_size\": self.batch_size,\n \"training_history\": self.training_history,\n \"iteration\": self.iteration,\n \"features\": self.featureset.as_dict()\n }", "def _train(self):\n step = 0\n for epoch in range(self.opts.num_epochs):\n self.hub.publish(Topic.EPOCH, epoch)\n for i, data in enumerate(self.loader):\n # Compute loss ...\n # NOTE(ycho): if one of the callbacks require training loss,\n # e.g. for logging, simply register a hook to the loss module\n # rather than trying to extract them here.\n loss = self.loss_fn(self.model, data)\n self.hub.publish(Topic.TRAIN_LOSS, loss)\n\n # Backprop + Optimize ...\n self.optim.zero_grad()\n loss[\"total\"].backward()\n self.optim.step()\n\n # Emit `step` event.\n # == logging, saving, evaluation\n self.hub.publish(Topic.STEP, step)\n step += 1\n\n if step >= self.opts.train_steps:\n return", "def _train_epoch(self, epoch):\n self.model.train()\n\n\n total_loss = 0\n total_metrics = np.zeros(len(self.metrics))\n length = len(self.data_loader)\n total_pm25_loss = 0\n total_pm10_loss = 0\n pm25_loss = 0\n pm10_loss = 0\n\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n output = self.model(data)\n # print(output.shape, target.shape)\n target = target.squeeze()\n model_loss = self.loss(output, target)\n\n # print(output.shape, target.shape)\n # pm25_predict, pm10_predict = torch.chunk(output, 2, dim=1)\n # pm25_target, pm10_target = torch.chunk(target, 2, dim=1)\n # pm25_loss = self.loss(pm25_predict, pm25_target)\n # pm10_loss = self.loss(pm10_predict, pm10_target)\n # total_pm25_loss += pm25_loss.item()\n # total_pm10_loss += pm10_loss.item()\n\n l2_reg = torch.tensor(0.0).to(self.device)\n if self.config['trainer']['l2_regularization']:\n for param in self.model.parameters():\n l2_reg += torch.norm(param, p=2)\n loss = model_loss + self.config['trainer']['l2_lambda'] * l2_reg\n else:\n loss = model_loss\n\n loss.backward()\n # torch.nn.utils.clip_grad_norm(self.model.parameters(), 1)\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.writer.add_scalar('loss', loss.item())\n total_loss += loss.item()\n # total_metrics += self._eval_metrics(output, target)\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f} model_loss: {:.6f} pm25_loss {:.6f} pm10_loss {:.6f} l2_loss: {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item(),\n model_loss.item(),\n pm25_loss,\n pm10_loss,\n l2_reg.item()))\n\n if batch_idx == self.len_epoch:\n break\n\n log = {\n 'loss': total_loss / length,\n 'rmse_pm25_loss': np.sqrt(total_pm25_loss / length),\n 'rmse_pm10_loss': np.sqrt(total_pm10_loss / length)\n }\n\n\n val_log = self._valid_epoch(epoch)\n log.update(val_log)\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step(val_log['val_loss'])\n\n return log", "def train_data(self):\n return self._train_data", "def on_train_begin(self, logs={}):\n self.val_kappas = []", "def train(self, ):\n raise NotImplementedError", "def _train_epoch(self, epoch: int) -> Dict[str, float]:\n logger.info(\"Epoch %d/%d\", epoch, self._num_epochs - 1)\n peak_cpu_usage = peak_memory_mb()\n logger.info(f\"Peak CPU memory usage MB: {peak_cpu_usage}\")\n gpu_usage = []\n for gpu, memory in gpu_memory_mb().items():\n gpu_usage.append((gpu, memory))\n logger.info(f\"GPU {gpu} memory usage MB: {memory}\")\n\n train_loss = 0.0\n # Set the model to \"train\" mode.\n self.model.train()\n\n num_gpus = len(self._cuda_devices)\n\n # Get tqdm for the training batches\n raw_train_generator = self.iterator(self.train_data,\n num_epochs=1,\n shuffle=self.shuffle)\n train_generator = lazy_groups_of(raw_train_generator, num_gpus)\n num_training_batches = math.ceil(self.iterator.get_num_batches(self.train_data)/num_gpus)\n self._last_log = time.time()\n last_save_time = time.time()\n\n batches_this_epoch = 0\n if self._batch_num_total is None:\n self._batch_num_total = 0\n\n histogram_parameters = set(self.model.get_parameters_for_histogram_tensorboard_logging())\n\n logger.info(\"Training\")\n train_generator_tqdm = Tqdm.tqdm(train_generator,\n total=num_training_batches)\n cumulative_batch_size = 0\n for batch_group in train_generator_tqdm:\n batches_this_epoch += 1\n self._batch_num_total += 1\n batch_num_total = self._batch_num_total\n\n self.optimizer.zero_grad()\n\n output_dict = self.get_output_dict(batch_group, for_training=True)\n loss = self.get_batch_loss(output_dict, for_training=True)\n\n if torch.isnan(loss):\n raise ValueError(\"nan loss encountered\")\n\n loss.backward()\n\n train_loss += loss.item()\n\n batch_grad_norm = self.rescale_gradients()\n\n # This does nothing if batch_num_total is None or you are using a\n # scheduler which doesn't update per batch.\n if self._learning_rate_scheduler:\n self._learning_rate_scheduler.step_batch(batch_num_total)\n if self._momentum_scheduler:\n self._momentum_scheduler.step_batch(batch_num_total)\n\n if self._tensorboard.should_log_histograms_this_batch():\n # get the magnitude of parameter updates for logging\n # We need a copy of current parameters to compute magnitude of updates,\n # and copy them to CPU so large models won't go OOM on the GPU.\n param_updates = {name: param.detach().cpu().clone()\n for name, param in self.model.named_parameters()}\n self.optimizer.step()\n for name, param in self.model.named_parameters():\n param_updates[name].sub_(param.detach().cpu())\n update_norm = torch.norm(param_updates[name].view(-1, ))\n param_norm = torch.norm(param.view(-1, )).cpu()\n self._tensorboard.add_train_scalar(\"gradient_update/\" + name,\n update_norm / (param_norm + 1e-7))\n else:\n self.optimizer.step()\n\n # Update moving averages\n if self._moving_average is not None:\n self._moving_average.apply(batch_num_total)\n\n # Update the description with the latest metrics\n metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch)\n description = training_util.description_from_metrics(metrics)\n\n train_generator_tqdm.set_description(description, refresh=False)\n\n # Log parameter values to Tensorboard\n if self._tensorboard.should_log_this_batch():\n self._tensorboard.log_parameter_and_gradient_statistics(self.model, batch_grad_norm)\n self._tensorboard.log_learning_rates(self.model, self.optimizer)\n\n self._tensorboard.add_train_scalar(\"loss/loss_train\", metrics[\"loss\"])\n self._tensorboard.log_metrics({\"epoch_metrics/\" + k: v for k, v in metrics.items()})\n\n if self.tensorboard_log_batch_callback:\n self.tensorboard_log_batch_callback(output_dict, self._tensorboard)\n\n if self._tensorboard.should_log_histograms_this_batch():\n self._tensorboard.log_histograms(self.model, histogram_parameters)\n\n if self._log_batch_size_period:\n cur_batch = sum([training_util.get_batch_size(batch) for batch in batch_group])\n cumulative_batch_size += cur_batch\n if (batches_this_epoch - 1) % self._log_batch_size_period == 0:\n average = cumulative_batch_size/batches_this_epoch\n logger.info(f\"current batch size: {cur_batch} mean batch size: {average}\")\n self._tensorboard.add_train_scalar(\"current_batch_size\", cur_batch)\n self._tensorboard.add_train_scalar(\"mean_batch_size\", average)\n\n # Save model if needed.\n if self._model_save_interval is not None and (\n time.time() - last_save_time > self._model_save_interval\n ):\n last_save_time = time.time()\n self._save_checkpoint(\n '{0}.{1}'.format(epoch, training_util.time_to_str(int(last_save_time)))\n )\n metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch, reset=True)\n metrics['cpu_memory_MB'] = peak_cpu_usage\n for (gpu_num, memory) in gpu_usage:\n metrics['gpu_'+str(gpu_num)+'_memory_MB'] = memory\n return metrics", "def NN(train_df, val_df, test_df, sub_path):\n logging.info('Neural Network preprocessing')\n \n if train_df is not None: \n y_train = train_df['is_attributed'].values\n train_df = train_df.drop('is_attributed', axis = 1)\n train_df = train_df.drop('attributed_time', axis = 1) \n #train_df = train_df.drop('click_time', axis = 1) #only if no preprocessing\n gc.collect()\n if val_df is not None:\n y_val = val_df['is_attributed'].values \n val_df = val_df.drop(['is_attributed'], axis = 1)\n val_df = get_keras_data(val_df)\n \n list_variables = get_values(train_df)\n print(list_variables)\n \n logging.info('Model is creating...') \n \n max_var = []\n if test_df is not None:\n for i, var in enumerate(list_variables):\n max_var.append(np.max([train_df[var].max(), test_df[var].max()])+1) \n train_df = get_keras_data(train_df)\n else:\n for i, var in enumerate(list_variables):\n max_var.append(train_df[var].max()+1) \n train_df = get_keras_data(train_df)\n \n emb_n = 50\n dense_n = 1000\n \n in_var = []\n emb_var = [] \n for i, var in enumerate(list_variables):\n in_var.append(Input(shape=[1], name = var))\n emb_var.append(Embedding(max_var[i], emb_n)(in_var[i]))\n \n fe = concatenate([emb for emb in emb_var])\n s_dout = SpatialDropout1D(0.2)(fe)\n fl1 = Flatten()(s_dout)\n #conv = Conv1D(100, kernel_size=4, strides=1, padding='same')(s_dout)\n dl = Dense(100)(s_dout)\n fl2 = Flatten()(dl)\n concat = concatenate([(fl1), (fl2)])\n x = Dropout(0.2)(Dense(dense_n,activation='relu')(concat))\n x = Dropout(0.2)(Dense(dense_n,activation='relu')(x))\n outp = Dense(1,activation='sigmoid')(x)\n \n model = Model(inputs=[var for var in in_var], outputs=outp)\n \n logging.info('Model is compiling...')\n \n batch_size = 50000\n epochs = 2 #12 for sample_train\n exp_decay = lambda init, fin, steps: (init/fin)**(1/(steps-1)) - 1\n steps = int(len(list(train_df)[0]) / batch_size) * epochs\n lr_init, lr_fin = 0.002, 0.0002\n lr_decay = exp_decay(lr_init, lr_fin, steps)\n optimizer_adam = Adam(lr=lr_init, decay=lr_decay)\n \n model.compile(loss='binary_crossentropy',optimizer=optimizer_adam,metrics=['accuracy'])\n model.summary()\n \n logging.info('Model is training...')\n \n model.fit(train_df, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=2, validation_split=0.1)\n del train_df, y_train; gc.collect()\n \n if val_df is not None:\n logging.info('Prediction on validation set')\n predictions_NN_prob = model.predict(val_df, batch_size=batch_size, verbose=2)\n del val_df; gc.collect()\n predictions_NN_prob = predictions_NN_prob[:,0]\n \n predictions_NN = np.where(predictions_NN_prob > 0.5, 1, 0)\n acc_NN = accuracy_score(y_val, predictions_NN)\n print('Overall accuracy of Neural Network model:', acc_NN)\n \n if test_df is not None:\n logging.info('Prediction on test set')\n sub = pd.DataFrame()\n sub['click_id'] = test_df['click_id'].astype('int')\n test_df = test_df.drop(['click_id'], axis=1)\n test_df = get_keras_data(test_df)\n \n sub['is_attributed'] = model.predict(test_df, batch_size=batch_size, verbose=2)\n del test_df; gc.collect()\n logging.info(\"Writing....\")\n with file_io.FileIO(sub_path, mode='wb') as fout:\n sub.to_csv(fout,index=False)\n logging.info(\"Done...\")\n logging.info(sub.info())", "def trainData(self, X, y, NeuralNet, epochs):", "def train():\n pass", "def __init__(self, cell_index, stimulus_type, loss, optimizer, mean_adapt):\n\n # compile the model\n with notify('Compiling'):\n self.model.compile(loss=loss, optimizer=optimizer)\n\n # save architecture as a json file\n self.savedir = mksavedir(prefix=str(self))\n with notify('Saving architecture'):\n with open(join(self.savedir, 'architecture.json'), 'w') as f:\n f.write(self.model.to_json())\n\n # function to write data to a CSV file\n self.save_csv = partial(tocsv, join(self.savedir, 'performance'))\n self.save_csv(['Epoch', 'Iteration', 'Training CC', 'Test CC'])\n # load experimental data\n self.stimulus_type = stimulus_type\n if str(self) == 'lstm':\n numTime = self.stim_shape[0]\n self.holdout = loadexpt(cell_index, self.stimulus_type, 'test', self.stim_shape[1], mean_adapt=mean_adapt)\n self.training = loadexpt(cell_index, self.stimulus_type, 'train', self.stim_shape[1], mean_adapt=mean_adapt)\n X_train = self.training.X\n y_train = self.training.y\n X_test = self.holdout.X\n y_test = self.holdout.y\n numTrain = (int(X_train.shape[0]/numTime))*numTime\n numTest = (int(X_test.shape[0]/numTime))*numTime\n X_train = X_train[:numTrain]\n y_train = y_train[:numTrain]\n X_test = X_test[:numTest]\n y_test = y_test[:numTest]\n X_train = np.reshape(X_train, (int(numTrain/numTime), numTime, self.stim_shape[1], self.stim_shape[2], self.stim_shape[3]))\n y_train = np.reshape(y_train, (int(numTrain/numTime), numTime, 1))\n X_test = np.reshape(X_test, (int(numTest/numTime), numTime, self.stim_shape[1], self.stim_shape[2], self.stim_shape[3]))\n y_test = np.reshape(y_test, (int(numTest/numTime), numTime, 1))\n\t self.training = Batch(X_train, y_train)\n\t self.holdout = Batch(X_test, y_test)\n else:\n self.holdout = loadexpt(cell_index, self.stimulus_type, 'test', self.stim_shape[0], mean_adapt=mean_adapt)\n self.training = loadexpt(cell_index, self.stimulus_type, 'train', self.stim_shape[0], mean_adapt=mean_adapt)\n # save model information to a markdown file\n if 'architecture' not in self.__dict__:\n self.architecture = 'No architecture information specified'\n\n metadata = ['# ' + str(self), '## ' + strftime('%B %d, %Y'),\n 'Started training on: ' + strftime('%I:%M:%S %p'),\n '### Architecture', self.architecture,\n '### Stimulus', 'Experiment 10-07-15', stimulus_type, 'Mean adaptation: ' + str(mean_adapt),\n 'Cell #{}'.format(cell_index),\n '### Optimization', str(loss), str(optimizer)]\n tomarkdown(join(self.savedir, 'README'), metadata)", "def training_data(self):\n if self._training_data is None:\n self._load_training_data()\n if self._swapped_training_data is None:\n self._swapped_training_data = {}\n for key, value in self._training_data.items():\n self._swapped_training_data[key] = value\n return self._swapped_training_data", "def save_trainable_variables (self , sess , savefn):\r\n state = getattr (self , 'state' , {})\r\n utils.train.save_trainable_variables(\r\n sess, savefn, self._scope, **state )", "def on_train_begin(self, logs={}):\n self._beta = []", "def train(train_features, train_labels, val_features, val_labels, network, optimizer, loss, config, log_date, log_timestamp):\n\n # prints the number of learnable parameters in the network\n count_parameters(network)\n\n # init network using weight initialization of choice\n network = init_weights(network)\n # send network to GPU\n network.to(config['gpu'])\n network.train()\n\n # if weighted loss chosen, calculate weights based on training dataset; else each class is weighted equally\n if config['use_weights']:\n class_weights = class_weight.compute_class_weight('balanced', classes=np.unique(train_labels + 1), y=train_labels + 1)\n if config['loss'] == 'cross_entropy':\n loss.weights = class_weights\n print('Applied weighted class weights: ')\n print(class_weights)\n else:\n class_weights = class_weight.compute_class_weight(None, classes=np.unique(train_labels + 1), y=train_labels + 1)\n if config['loss'] == 'cross_entropy':\n loss.weights = class_weights\n\n\n # initialize optimizer and loss\n opt, criterion = optimizer, loss\n\n if config['loss'] == 'maxup':\n maxup = Maxup(myNoiseAdditionAugmenter, ntrials=4)\n\n # initialize training and validation dataset, define DataLoaders\n dataset = torch.utils.data.TensorDataset(torch.from_numpy(train_features), torch.from_numpy(train_labels))\n trainloader = DataLoader(dataset, batch_size=config['batch_size'], shuffle=True)\n dataset = torch.utils.data.TensorDataset(torch.from_numpy(val_features).float(), torch.from_numpy(val_labels))\n valloader = DataLoader(dataset, batch_size=config['batch_size'], shuffle=False)\n\n # counters and objects used for early stopping and learning rate adjustment\n best_loss = np.inf\n best_network = None\n best_val_losses = None\n best_train_losses = None\n best_val_preds = None\n best_train_preds = None\n early_stop = False\n lr_pt_counter = 0\n es_pt_counter = 0\n\n # training loop; iterates through epochs\n for e in range(config['epochs']):\n \"\"\"\n TRAINING\n \"\"\"\n # helper objects\n train_preds = []\n train_gt = []\n train_losses = []\n start_time = time.time()\n batch_num = 1\n\n # iterate over train dataset\n for i, (x, y) in enumerate(trainloader):\n # send x and y to GPU\n inputs, targets = x.to(config['gpu']), y.to(config['gpu'])\n # zero accumulated gradients\n opt.zero_grad()\n\n if config['loss'] == 'maxup':\n # Increase the inputs via data augmentation\n inputs, targets = maxup(inputs, targets)\n\n # send inputs through network to get predictions, calculate loss and backpropagate\n train_output = network(inputs)\n\n if config['loss'] == 'maxup':\n # calculates loss\n train_loss = maxup.maxup_loss(train_output, targets.long())[0]\n else:\n train_loss = criterion(train_output, targets.long())\n\n train_loss.backward()\n opt.step()\n # append train loss to list\n train_losses.append(train_loss.item())\n\n # create predictions and append them to final list\n y_preds = np.argmax(train_output.cpu().detach().numpy(), axis=-1)\n y_true = targets.cpu().numpy().flatten()\n train_preds = np.concatenate((np.array(train_preds, int), np.array(y_preds, int)))\n train_gt = np.concatenate((np.array(train_gt, int), np.array(y_true, int)))\n\n # if verbose print out batch wise results (batch number, loss and time)\n if config['verbose']:\n if batch_num % config['print_freq'] == 0 and batch_num > 0:\n cur_loss = np.mean(train_losses)\n elapsed = time.time() - start_time\n print('| epoch {:3d} | {:5d} batches | ms/batch {:5.2f} | '\n 'train loss {:5.2f}'.format(e, batch_num, elapsed * 1000 / config['batch_size'], cur_loss))\n start_time = time.time()\n batch_num += 1\n\n # plot gradient flow if wanted\n if config['save_gradient_plot']:\n plot_grad_flow(network)\n\n \"\"\"\n VALIDATION\n \"\"\"\n\n # helper objects\n val_preds = []\n val_gt = []\n val_losses = []\n\n # set network to eval mode\n network.eval()\n with torch.no_grad():\n # iterate over validation dataset\n for i, (x, y) in enumerate(valloader):\n # send x and y to GPU\n inputs, targets = x.to(config['gpu']), y.to(config['gpu'])\n\n if config['loss'] == 'maxup':\n # Increase the inputs via data augmentation\n inputs, targets = maxup(inputs, targets)\n\n # send inputs through network to get predictions, loss and calculate softmax probabilities\n val_output = network(inputs)\n if config['loss'] == 'maxup':\n # calculates loss\n val_loss = maxup.maxup_loss(val_output, targets.long())[0]\n else:\n val_loss = criterion(val_output, targets.long())\n\n val_output = torch.nn.functional.softmax(val_output, dim=1)\n\n # append validation loss to list\n val_losses.append(val_loss.item())\n\n # create predictions and append them to final list\n y_preds = np.argmax(val_output.cpu().numpy(), axis=-1)\n y_true = targets.cpu().numpy().flatten()\n val_preds = np.concatenate((np.array(val_preds, int), np.array(y_preds, int)))\n val_gt = np.concatenate((np.array(val_gt, int), np.array(y_true, int)))\n\n # print epoch evaluation results for train and validation dataset\n print(\"EPOCH: {}/{}\".format(e + 1, config['epochs']),\n \"Train Loss: {:.4f}\".format(np.mean(train_losses)),\n \"Train Acc: {:.4f}\".format(jaccard_score(train_gt, train_preds, average='macro')),\n \"Train Prec: {:.4f}\".format(precision_score(train_gt, train_preds, average='macro')),\n \"Train Rcll: {:.4f}\".format(recall_score(train_gt, train_preds, average='macro')),\n \"Train F1: {:.4f}\".format(f1_score(train_gt, train_preds, average='macro')),\n \"Val Loss: {:.4f}\".format(np.mean(val_losses)),\n \"Val Acc: {:.4f}\".format(jaccard_score(val_gt, val_preds, average='macro')),\n \"Val Prec: {:.4f}\".format(precision_score(val_gt, val_preds, average='macro')),\n \"Val Rcll: {:.4f}\".format(recall_score(val_gt, val_preds, average='macro')),\n \"Val F1: {:.4f}\".format(f1_score(val_gt, val_preds, average='macro')))\n\n # if chosen, print the value counts of the predicted labels for train and validation dataset\n if config['print_counts']:\n y_train = np.bincount(train_preds)\n ii_train = np.nonzero(y_train)[0]\n y_val = np.bincount(val_preds)\n ii_val = np.nonzero(y_val)[0]\n print('Predicted Train Labels: ')\n print(np.vstack((ii_train, y_train[ii_train])).T)\n print('Predicted Val Labels: ')\n print(np.vstack((ii_val, y_val[ii_val])).T)\n\n # if adjust learning rate is enabled\n if config['adj_lr'] or config['early_stopping']:\n if best_loss < np.mean(val_losses):\n lr_pt_counter += 1\n es_pt_counter += 1\n\n # adjust learning rate check\n if lr_pt_counter >= config['adj_lr_patience'] and config['adj_lr']:\n config['lr'] *= 0.1\n for param_group in opt.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n print('Changing learning rate to {} since no loss improvement over {} epochs.'\n .format(config['lr'], str(lr_pt_counter)))\n\n # early stopping check\n if es_pt_counter >= config['es_patience'] and config['early_stopping']:\n print('Stopping training early since no loss improvement over {} epochs.'\n .format(str(es_pt_counter)))\n early_stop = True\n # print results of best epoch\n print('Final (best) results: ')\n print(\"Train Loss: {:.4f}\".format(np.mean(best_train_losses)),\n \"Train Acc: {:.4f}\".format(jaccard_score(train_gt, best_train_preds, average='macro')),\n \"Train Prec: {:.4f}\".format(precision_score(train_gt, best_train_preds, average='macro')),\n \"Train Rcll: {:.4f}\".format(recall_score(train_gt, best_train_preds, average='macro')),\n \"Train F1: {:.4f}\".format(f1_score(train_gt, best_train_preds, average='macro')),\n \"Val Loss: {:.4f}\".format(np.mean(best_val_losses)),\n \"Val Acc: {:.4f}\".format(jaccard_score(val_gt, best_val_preds, average='macro')),\n \"Val Prec: {:.4f}\".format(precision_score(val_gt, best_val_preds, average='macro')),\n \"Val Rcll: {:.4f}\".format(recall_score(val_gt, best_val_preds, average='macro')),\n \"Val F1: {:.4f}\".format(f1_score(val_gt, best_val_preds, average='macro')))\n\n else:\n lr_pt_counter = 0\n es_pt_counter = 0\n best_network = network\n best_loss = np.mean(val_losses)\n best_train_losses = train_losses\n best_train_preds = train_preds\n best_val_losses = val_losses\n best_val_preds = val_preds\n else:\n best_network = network\n best_train_losses = train_losses\n best_train_preds = train_preds\n best_val_losses = val_losses\n best_val_preds = val_preds\n\n # set network to train mode again\n network.train()\n\n if early_stop:\n break\n\n # if plot_gradient gradient plot is shown at end of training\n if config['save_gradient_plot']:\n mkdir_if_missing(os.path.join('logs', log_date, log_timestamp))\n plt.savefig(os.path.join('logs', log_date, log_timestamp, 'grad_flow.png'))\n\n # return validation, train and test predictions as numpy array with ground truth\n return best_network, np.vstack((best_val_preds, val_gt)).T, np.vstack((best_train_preds, train_gt)).T", "def setup_training(self):\n print('setup training called')\n self.steps_done = 0\n self.current_episode_num = 1\n self.total_reward = 0\n\n # self.optimizer = optim.RMSprop(policy_net.parameters())\n self.memory = ReplayMemory(300000)\n self.total_reward_history = []\n # self.loss_history = []\n self.positions = []\n self.n_destroyed_crates = 0\n self.is_in_bomb_range = False", "def train(self, verbose=True):\n\n\n learned = False\n iteration = 0\n\n from util.loss_functions import DifferentError\n loss = DifferentError()\n\n\n\n\n\n # Train for some epochs if the error is not 0\n while not learned:\n # x ist ein Bild bestehend aus einem Label (erster Eintrag) und 784 Pixeln\n # t ist das Zielergebnis von x (überprüfbar mit dem Label)\n # o ist der tatsächliche Ergebnis von x\n # w ist der Gewichtsvektor\n # Als Aktivierungsfunktion verwenden wir die Sigmoid Funktion\n # Das Training wird dann beendet, sobald das Fehlerkriterium konvergiert\n\n totalError = 0\n\n output = []\n labels = self.trainingSet.label\n inputs = self.trainingSet.input\n\n # iteriere für jede Instanz im Trainingsset x € X\n for input in inputs:\n # Ermittle O_x = sig(w*x)\n output.append(self.fire(input))\n\n # Ermittle Fehler AE = tx - ox\n error = loss.calculateError(np.array(labels), np.array(output))\n\n # grad = [0]\n grad = np.zeros(len(self.trainingSet.input[0]))\n grad2 = np.zeros(len(self.trainingSet.input[0]))\n\n for e, input, out in zip(error, inputs, output):\n activationPrime = Activation.getDerivative(activationName)(np.dot(np.array(input), self.weight))\n #grad += np.multiply( np.multiply( input, e), activationPrime)\n grad += np.multiply( input, e)\n\n # Update grad = grad + errorPrime * x * activationPrime\n\n\n\n # print grad - grad2\n #print \"Error: \" + str(error) + \" Grad: \" + str(grad)\n\n # update w: w <- w + n*grad\n self.updateWeights(grad)\n\n\n iteration += 1\n totalError = error.sum()\n\n if verbose:\n logging.info(\"Epoch: %i; Error: %i\", iteration, totalError)\n\n if abs(totalError) < 0.01 or iteration >= self.epochs:\n # stop criteria is reached\n learned = True\n\n pass", "def _train_internal(self, opts):\n\n batches_num = self._data.num_points / opts['batch_size']\n train_size = self._data.num_points\n num_plot = 320\n sample_prev = np.zeros([num_plot] + list(self._data.data_shape))\n l2s = []\n\n counter = 0\n decay = 1.\n logging.error('Training VAE')\n for _epoch in xrange(opts[\"gan_epoch_num\"]):\n\n if opts['decay_schedule'] == \"manual\":\n if _epoch == 30:\n decay = decay / 2.\n if _epoch == 50:\n decay = decay / 5.\n if _epoch == 100:\n decay = decay / 10.\n\n if _epoch > 0 and _epoch % opts['save_every_epoch'] == 0:\n os.path.join(opts['work_dir'], opts['ckpt_dir'])\n self._saver.save(self._session,\n os.path.join(opts['work_dir'],\n opts['ckpt_dir'],\n 'trained-pot'),\n global_step=counter)\n\n for _idx in xrange(batches_num):\n # logging.error('Step %d of %d' % (_idx, batches_num ) )\n data_ids = np.random.choice(train_size, opts['batch_size'],\n replace=False, p=self._data_weights)\n batch_images = self._data.data[data_ids].astype(np.float)\n batch_noise = utils.generate_noise(opts, opts['batch_size'])\n _, loss, loss_kl, loss_reconstruct = self._session.run(\n [self._optim, self._loss, self._loss_kl,\n self._loss_reconstruct],\n feed_dict={self._real_points_ph: batch_images,\n self._noise_ph: batch_noise,\n self._lr_decay_ph: decay,\n self._is_training_ph: True})\n counter += 1\n\n if opts['verbose'] and counter % opts['plot_every'] == 0:\n debug_str = 'Epoch: %d/%d, batch:%d/%d' % (\n _epoch+1, opts['gan_epoch_num'], _idx+1, batches_num)\n debug_str += ' [L=%.2g, Recon=%.2g, KLQ=%.2g]' % (\n loss, loss_reconstruct, loss_kl)\n logging.error(debug_str)\n\n if opts['verbose'] and counter % opts['plot_every'] == 0:\n metrics = Metrics()\n points_to_plot = self._run_batch(\n opts, self._generated, self._noise_ph,\n self._noise_for_plots[0:num_plot],\n self._is_training_ph, False)\n l2s.append(np.sum((points_to_plot - sample_prev)**2))\n metrics.l2s = l2s[:]\n metrics.make_plots(\n opts,\n counter,\n None,\n points_to_plot,\n prefix='sample_e%04d_mb%05d_' % (_epoch, _idx))\n reconstructed = self._session.run(\n self._reconstruct_x,\n feed_dict={self._real_points_ph: batch_images,\n self._is_training_ph: False})\n metrics.l2s = None\n metrics.make_plots(\n opts,\n counter,\n None,\n reconstructed,\n prefix='reconstr_e%04d_mb%05d_' % (_epoch, _idx))\n if opts['early_stop'] > 0 and counter > opts['early_stop']:\n break\n if _epoch > 0:\n os.path.join(opts['work_dir'], opts['ckpt_dir'])\n self._saver.save(self._session,\n os.path.join(opts['work_dir'],\n opts['ckpt_dir'],\n 'trained-pot-final'),\n global_step=counter)", "def on_train_end(self, logs={}):\n LOSSES.append(self.losses)\n print(self.j)\n (x_test, y_test) = get_test_data(self.j)\n y_pred = model.predict(x_test)\n y_pred = y_pred.squeeze()\n y_pred[y_pred < 0.5] = 0\n y_pred[y_pred >= 0.5] = 1\n print(y_pred)\n\n y_test = y_test.squeeze()\n print(y_test)\n confmat = confusion_matrix(y_test,y_pred)\n print(confmat)\n calc_TSS(confmat,2)", "def __init__(self,\n exp_name,\n ds_train,\n ds_val,\n epochs=210,\n batch_size=16,\n num_workers=4,\n loss='JointsMSELoss',\n lr=0.001,\n lr_decay=True,\n lr_decay_steps=(170, 200),\n lr_decay_gamma=0.1,\n optimizer='Adam',\n weight_decay=0.,\n momentum=0.9,\n nesterov=False,\n pretrained_weight_path=None,\n checkpoint_path=None,\n log_path='./logs',\n use_tensorboard=True,\n model_c=48,\n model_nof_joints=18,\n model_bn_momentum=0.1,\n flip_test_images=True,\n device=None\n ):\n super(Train, self).__init__()\n\n self.exp_name = exp_name\n self.ds_train = ds_train\n self.ds_val = ds_val\n self.epochs = epochs\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.loss = loss\n self.lr = lr\n self.lr_decay = lr_decay\n self.lr_decay_steps = lr_decay_steps\n self.lr_decay_gamma = lr_decay_gamma\n self.optimizer = optimizer\n self.weight_decay = weight_decay\n self.momentum = momentum\n self.nesterov = nesterov\n self.pretrained_weight_path = pretrained_weight_path\n self.checkpoint_path = checkpoint_path\n self.log_path = os.path.join(log_path, self.exp_name)\n self.use_tensorboard = use_tensorboard\n self.model_c = model_c\n self.model_nof_joints = model_nof_joints\n self.model_bn_momentum = model_bn_momentum\n self.flip_test_images = flip_test_images\n self.epoch = 0\n\n\n os.makedirs(self.log_path, 0o755, exist_ok=True) # exist_ok=False to avoid overwriting\n if self.use_tensorboard:\n self.summary_writer = tb.SummaryWriter(self.log_path)\n\n #\n # write all experiment parameters in parameters.txt and in tensorboard text field\n self.parameters = [x + ': ' + str(y) + '\\n' for x, y in locals().items()]\n\n with open(os.path.join(self.log_path, 'parameters.txt'), 'w') as fd:\n fd.writelines(self.parameters)\n if self.use_tensorboard:\n self.summary_writer.add_text('parameters', '\\n'.join(self.parameters))\n\n #\n # load model\n self.model = HRNet(c=self.model_c, nof_joints=self.model_nof_joints,\n bn_momentum=self.model_bn_momentum).cuda()\n\n\n #\n # define loss and optimizers\n if self.loss == 'JointsMSELoss':\n self.loss_fn = JointsMSELoss()\n elif self.loss == 'JointsOHKMMSELoss':\n self.loss_fn = JointsOHKMMSELoss()\n else:\n raise NotImplementedError\n\n if optimizer == 'SGD':\n self.optim = SGD(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay,\n momentum=self.momentum, nesterov=self.nesterov)\n elif optimizer == 'Adam':\n self.optim = Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)\n else:\n raise NotImplementedError\n\n\n # load pre-trained weights (such as those pre-trained on imagenet)\n if self.pretrained_weight_path is not None:\n if self.model_nof_joints == 18:\n pretrained_dict = torch.load(self.pretrained_weight_path)\n pretrained_dict_items = list(pretrained_dict.items())\n pretrained_model = {}\n j = 0\n for k, v in self.model.state_dict().items():\n v = pretrained_dict_items[j][1]\n k = pretrained_dict_items[j][0]\n\n if k == 'final_layer.weight':\n x = torch.rand(1,48,1,1).cuda()\n v = torch.cat([v, x], dim=0)\n if k == 'final_layer.bias':\n x = torch.rand(1).cuda()\n v = torch.cat([v,x],dim=0)\n pretrained_model[k] = v\n j +=1\n model_dict=self.model.state_dict()\n model_dict.update(pretrained_model)\n self.model.load_state_dict(model_dict,strict=True)\n else:\n self.model.load_state_dict(torch.load(self.pretrained_weight_path, strict=True))\n print('Pre-trained weights loaded.')\n\n self.model = nn.DataParallel(self.model.cuda())\n # self.model = nn.DataParallel(self.model.to(self.device))\n #\n # load previous checkpoint\n if self.checkpoint_path is not None:\n print('Loading checkpoint %s...' % self.checkpoint_path)\n if os.path.isdir(self.checkpoint_path):\n path = os.path.join(self.checkpoint_path, 'checkpoint_last.pth')\n else:\n path = self.checkpoint_path\n self.starting_epoch, self.model, self.optim, self.params = load_checkpoint(path, self.model, self.optim,\n self.device)\n else:\n self.starting_epoch = 0\n\n if lr_decay:\n self.lr_scheduler = MultiStepLR(self.optim, list(self.lr_decay_steps), gamma=self.lr_decay_gamma,\n last_epoch=self.starting_epoch if self.starting_epoch else -1)\n\n #\n # load train and val datasets\n self.dl_train = DataLoader(self.ds_train, batch_size=self.batch_size, shuffle=True,\n num_workers=self.num_workers, drop_last=True)\n self.len_dl_train = len(self.dl_train)\n\n # dl_val = DataLoader(self.ds_val, batch_size=1, shuffle=False, num_workers=num_workers)\n self.dl_val = DataLoader(self.ds_val, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers)\n self.len_dl_val = len(self.dl_val)\n\n #\n # initialize variables\n self.mean_loss_train = 0.\n self.mean_acc_train = 0.\n self.mean_loss_val = 0.\n self.mean_acc_val = 0.\n self.mean_mAP_val = 0.\n\n self.best_loss = None\n self.best_acc = None\n self.best_mAP = None", "def train(self):\n if self.retrain:\n self.states = self.get_states()\n self.transitions = self.get_transitions()\n self.matrix = self.get_matrix()\n self.save_training()\n else:\n self.load_training()", "def define_training_data(self, train_sources, train_labels=None):\n logging.info(\"Defining training data for NNetModel...\")\n self.train_cols = []\n if train_labels is None:\n for source in train_sources:\n self.train_cols += self._read(source)\n else:\n for source, label in zip(train_sources, train_labels):\n self.train_cols += self._read(source, label)\n\n logging.info(\"NNetModel: Training data contains {} columns from {} sources\".format(len(self.train_cols), len(train_sources)))", "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n\n if self.config[\"amp\"]:\n # AMP!\n with autocast():\n output = self.model(data)\n loss = self.criterion(output, target)\n else:\n output = self.model(data)\n loss = self.criterion(output, target)\n\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update(\"loss\", loss.item())\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, target))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug(\n \"Train Epoch: {} {} Loss: {:.6f}\".format(\n epoch, self._progress(batch_idx), loss.item()\n )\n )\n self.writer.add_image(\n \"input\", make_grid(data.cpu(), nrow=8, normalize=True)\n )\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{\"val_\" + k: v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log", "def on_train_end(self, logs=None):", "def on_train_end(self, logs=None):", "def trainModel( self, featureTrain, classTrain):", "def data(self):\n (x_train, y_train), (_, _) = datasets.fashion_mnist.load_data()\n x_train = x_train.reshape((-1, 28, 28, 1))\n x_train, y_train = x_train.astype('float16') / 255.0, \\\n tf.keras.utils.to_categorical(y_train.astype('float16'), 10)\n (x_train, x_eval) = x_train[5000:], x_train[:5000]\n (y_train, y_eval) = y_train[5000:], y_train[:5000]\n train_data, eval_data = (x_train, y_train), (x_eval, y_eval)\n return train_data, eval_data", "def setup_training(self):\n self.transitions = deque(maxlen=TRANSITION_HISTORY_SIZE)\n self.total_rewards = []\n self.rewards = []\n self.steps = []\n self.average_rewards = []\n self.average_steps = []\n self.model = initialize_model()\n self.invalid_actions = 0\n self.average_invalid_actions = []\n self.total_invalid_actions = []", "def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')", "def train_init():\n np.random.seed(seed)\n tf.random.set_random_seed(seed)\n random.seed(seed)\n\n name = str(seed)\n desc = MNMDescriptor(5, inp_dict, outp_dict, name=name)\n desc = recursive_creator(desc, 0, 0, seed)\n hypers = {}\n for hyper in hyps:\n hypers[hyper] = np.random.choice(hyps[hyper])\n\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, lr=hypers[\"lr\"], opt=hypers[\"opt\"], random_seed=seed)\n if intelligent_training == 2:\n loss_weights = model.sequential_training(hypers[\"btch_sz\"], iter_lim // 50, conv_param, proportion, iter_lim, display_step=-1)\n else:\n loss_weights = model.autoset_training(hypers[\"btch_sz\"], iter_lim//50, conv_param, proportion, iter_lim, display_step=-1, incr=incr, decr=decr, scaling=scale)\n\n\n # ####### Save model characteristics.\n\n model.descriptor.save(path=\"\")\n model.save_weights(path=\"\")\n\n results = evaluate_model(model)\n\n np.save(\"hypers\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", hypers)\n\n np.save(\"orig_results\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", results)\n\n np.save(\"loss_weights\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", loss_weights)", "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target_seg, target_class) in enumerate(self.data_loader):\n data, target_seg, target_class = data.to(self.device), target_seg.to(self.device), target_class.to(self.device)\n\n self.optimizer.zero_grad()\n output_seg, output_class = self.model(data)\n loss = self.criterion((output_seg, output_class), target_seg, target_class, epoch)\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update('loss', loss.item())\n for met in self.metric_ftns:\n if met.__name__ == \"accuracy\":\n self.train_metrics.update(met.__name__, met(output_class, target_class))\n else:\n self.train_metrics.update(met.__name__, met(output_seg, target_seg))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item()))\n\n self._visualize_input(data.cpu())\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n return log", "def on_train_begin(self, logs=None):\n pass", "def update_train_state(self):\n\n # Save one model at least\n if self.train_state['epoch_index'] == 0:\n # torch.save(self.classifier.state_dict(), self.train_state['model_filename'])\n self.save_model()\n self.train_state['stop_early'] = False\n\n # Save model if performance improved\n elif self.train_state['epoch_index'] >= 1:\n loss_tm1, loss_t = self.train_state['val_loss'][-2:]\n\n # If loss worsened\n if loss_t >= self.train_state['early_stopping_best_val']:\n # Update step\n self.train_state['early_stopping_step'] += 1\n # Loss decreased\n else:\n # Save the best model\n if loss_t < self.train_state['early_stopping_best_val']:\n self.save_model()\n self.train_state['early_stopping_best_val'] = loss_t\n\n # Reset early stopping step\n self.train_state['early_stopping_step'] = 0\n\n # Stop early ?\n self.train_state['stop_early'] = \\\n self.train_state['early_stopping_step'] >= self.args.early_stopping_criteria", "def on_train_begin(self, logs):\n self.train_start = timeit.default_timer()\n self.metrics_names = self.model.metrics_names\n print('Training for {} steps ...'.format(self.params['nb_steps']))", "def train(self, hyps):\n\n # Print Hyperparameters To Screen\n items = list(hyps.items())\n for k, v in sorted(items):\n print(k+\":\", v)\n\n # Make Save Files\n if \"save_folder\" in hyps:\n save_folder = hyps['save_folder']\n else:\n save_folder = \"./saved_data/\"\n\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n base_name = save_folder + hyps['exp_name']\n net_save_file = base_name+\"_net.p\"\n best_net_file = base_name+\"_best.p\"\n optim_save_file = base_name+\"_optim.p\"\n log_file = base_name+\"_log.txt\"\n if hyps['resume']: log = open(log_file, 'a')\n else: log = open(log_file, 'w')\n for k, v in sorted(items):\n log.write(k+\":\"+str(v)+\"\\n\")\n\n # Miscellaneous Variable Prep\n logger = Logger()\n shared_len = hyps['n_tsteps']*hyps['n_rollouts']\n env = gym.make(hyps['env_type'])\n obs = env.reset()\n prepped = hyps['preprocess'](obs)\n hyps['state_shape'] = [hyps['n_frame_stack']] + [*prepped.shape[1:]]\n if hyps['env_type'] == \"Pong-v0\":\n action_size = 3\n else:\n action_size = env.action_space.n*(hyps['env_type']!=\"Pong-v0\")\n hyps['action_shift'] = (4-action_size)*(hyps['env_type']==\"Pong-v0\") \n print(\"Obs Shape:,\",obs.shape)\n print(\"Prep Shape:,\",prepped.shape)\n print(\"State Shape:,\",hyps['state_shape'])\n print(\"Num Samples Per Update:\", shared_len)\n print(\"Samples Wasted in Update:\", shared_len % hyps['batch_size'])\n del env\n\n # Make Network\n net = hyps['model'](hyps['state_shape'],action_size,h_size=hyps['h_size'],bnorm=hyps['use_bnorm'])\n if hyps['resume']:\n net.load_state_dict(torch.load(net_save_file))\n base_net = copy.deepcopy(net)\n net = cuda_if(net)\n net.share_memory()\n base_net = cuda_if(base_net)\n\n # Prepare Shared Variables\n shared_data = {'states': cuda_if(torch.zeros(shared_len, *hyps['state_shape']).share_memory_()),\n 'rewards': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'deltas': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'dones': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'actions': torch.zeros(shared_len).long().share_memory_()}\n if net.is_recurrent:\n shared_data['h_states'] = cuda_if(torch.zeros(shared_len, hyps['h_size']).share_memory_())\n n_rollouts = hyps['n_rollouts']\n gate_q = mp.Queue(n_rollouts)\n stop_q = mp.Queue(n_rollouts)\n reward_q = mp.Queue(1)\n reward_q.put(-1)\n\n # Make Runners\n runners = []\n for i in range(hyps['n_envs']):\n runner = Runner(shared_data, hyps, gate_q, stop_q, reward_q)\n runners.append(runner)\n\n # Start Data Collection\n print(\"Making New Processes\")\n procs = []\n for i in range(len(runners)):\n proc = mp.Process(target=runners[i].run, args=(net,))\n procs.append(proc)\n proc.start()\n print(i, \"/\", len(runners), end='\\r')\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Make Updater\n updater = Updater(base_net, hyps)\n if hyps['resume']:\n updater.optim.load_state_dict(torch.load(optim_save_file))\n updater.optim.zero_grad()\n updater.net.train(mode=True)\n updater.net.req_grads(True)\n\n # Prepare Decay Precursors\n entr_coef_diff = hyps['entr_coef'] - hyps['entr_coef_low']\n epsilon_diff = hyps['epsilon'] - hyps['epsilon_low']\n lr_diff = hyps['lr'] - hyps['lr_low']\n\n # Training Loop\n past_rews = deque([0]*hyps['n_past_rews'])\n last_avg_rew = 0\n best_rew_diff = 0\n best_avg_rew = -1000\n epoch = 0\n T = 0\n while T < hyps['max_tsteps']:\n basetime = time.time()\n epoch += 1\n\n # Collect data\n for i in range(n_rollouts):\n stop_q.get()\n collection_time = time.time() - col_start_time\n\n T += shared_len\n\n # Reward Stats\n avg_reward = reward_q.get()\n reward_q.put(avg_reward)\n last_avg_rew = avg_reward\n if avg_reward > best_avg_rew:\n best_avg_rew = avg_reward\n updater.save_model(best_net_file, None)\n\n # Calculate the Loss and Update nets\n start_time = time.time()\n updater.update_model(shared_data)\n update_time = time.time() - start_time\n net.load_state_dict(updater.net.state_dict()) # update all collector nets\n \n # Resume Data Collection\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Decay HyperParameters\n if hyps['decay_eps']:\n updater.epsilon = (1-T/(hyps['max_tsteps']))*epsilon_diff + hyps['epsilon_low']\n print(\"New Eps:\", updater.epsilon)\n if hyps['decay_lr']:\n new_lr = (1-T/(hyps['max_tsteps']))*lr_diff + hyps['lr_low']\n updater.new_lr(new_lr)\n print(\"New lr:\", new_lr)\n if hyps['decay_entr']:\n updater.entr_coef = entr_coef_diff*(1-T/(hyps['max_tsteps']))+hyps['entr_coef_low']\n print(\"New Entr:\", updater.entr_coef)\n\n # Periodically save model\n if epoch % 10 == 0:\n updater.save_model(net_save_file, optim_save_file)\n\n # Print Epoch Data\n past_rews.popleft()\n past_rews.append(avg_reward)\n max_rew, min_rew = deque_maxmin(past_rews)\n updater.print_statistics()\n avg_action = shared_data['actions'].float().mean().item()\n print(\"Epoch\", epoch, \"– T =\", T)\n print(\"Grad Norm:\",float(updater.norm),\"– Avg Action:\",avg_action,\"– Best AvgRew:\",best_avg_rew)\n print(\"Avg Rew:\", avg_reward, \"– High:\", max_rew, \"– Low:\", min_rew, end='\\n')\n updater.log_statistics(log, T, avg_reward, avg_action, best_avg_rew)\n updater.info['AvgRew'] = avg_reward\n logger.append(updater.info, x_val=T)\n\n # Check for memory leaks\n gc.collect()\n max_mem_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n print(\"Time:\", time.time()-basetime, \"– Collection:\", collection_time, \"– Update:\", update_time)\n if 'hyp_search_count' in hyps and hyps['hyp_search_count'] > 0 and hyps['search_id'] != None:\n print(\"Search:\", hyps['search_id'], \"/\", hyps['hyp_search_count'])\n print(\"Memory Used: {:.2f} memory\\n\".format(max_mem_used / 1024))\n\n logger.make_plots(base_name)\n log.write(\"\\nBestRew:\"+str(best_avg_rew))\n log.close()\n # Close processes\n for p in procs:\n p.terminate()\n return best_avg_rew", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def Train(self):\n self.init_epoch = self.epoch\n if self.epoch >= self.params.num_epoch:\n WARNING('Num_epoch should be smaller than current epoch. Skip training......\\n')\n else:\n for _ in range(self.epoch, self.params.num_epoch):\n self.epoch += 1\n print('-' * 20 + 'Epoch.' + str(self.epoch) + '-' * 20)\n\n # train one epoch\n self.train_one_epoch()\n\n # should display\n if self.epoch % self.params.display == 0:\n print('\\tTrain loss: %.4f' % self.train_loss[-1])\n\n # should save\n if self.params.should_save:\n if self.epoch % self.params.save_every == 0:\n self.save_checkpoint()\n\n # test every params.test_every epoch\n if self.params.should_val:\n if self.epoch % self.params.val_every == 0:\n self.val_one_epoch()\n print('\\tVal loss: %.4f' % self.val_loss[-1])\n\n # adjust learning rate\n self.adjust_lr()\n self.train_one_epoch_Image_display() \n \n # save the last network state\n if self.params.should_save:\n self.save_checkpoint()\n\n # train visualization\n self.plot_curve()", "def _store_feats(layer, inp, output):\n _model_feats.append(output.cpu().numpy())", "def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def on_train_begin(self, state: FuseManagerState) -> None:\n # File writer imports are done here in order to workaround the GPU issues -\n # when importing torch.tensorboard cuda gets occupied - do that only AFTER CUDA_VISIBLE_DEVICES is set\n try:\n # available only from torch 1.2\n from torch.utils.tensorboard import SummaryWriter\n self.writer_class = SummaryWriter\n self.use_summary_tf = False\n except ModuleNotFoundError:\n # fallback, use tensorflow file writer\n from tensorflow.summary import FileWriter\n import tensorflow as tf\n self.writer_class = FileWriter\n self.tf_summary = tf.Summary\n self.use_summary_tf = True\n\n tensorboard_train_dir = os.path.join(self.model_dir, 'train')\n tensorboard_validation_dir = os.path.join(self.model_dir, 'validation')\n\n # make sure we have these folders\n file.create_dir(tensorboard_train_dir, error_if_exist=False)\n file.create_dir(tensorboard_validation_dir, error_if_exist=False)\n\n # Get TensorBoard loggers\n self.tensorboard_logger_train = self.writer_class(tensorboard_train_dir)\n self.tensorboard_logger_validation = self.writer_class(tensorboard_validation_dir)\n pass", "def write_training_metrics(self) -> None:\n self.trainer_metrics.write_training_metrics()", "def train(self):\n raise NotImplementedError()", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def getTrainInstance(self): #NOTE: Probably faster way of doing this than additional 'if' statement every learning iteration\r\n return [self.currentTrainState, self.currentTrainPhenotype] #Return unadulterated training data\r", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def train(self, train_dataset, val_dataset):\n\n # check fine_tuning option\n model_path = os.path.join(self.check_point, 'model.pt')\n if self.fine_tune and not os.path.exists(model_path):\n raise Exception('Cannot find %s.' % model_path)\n elif self.fine_tune and os.path.exists(model_path):\n if self.verbose:\n print('Loading %s for finetuning.' % model_path)\n self.model = torch.load(model_path)\n '''\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n model_dict = self.model.state_dict()\n net_dict = net.state_dict()\n # 1. filter out unnecessary keys\n pretrained_dict = {k: v for k, v in net_dict.items() if k in model_dict}\n # 2. overwrite entries in the existing state dict\n model_dict.update(pretrained_dict)\n # 3. load the new state dict\n self.model.load_state_dict(model_dict)\n '''\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n\n # capture best model\n best_val_psnr = -1\n best_psnr = -1\n best_model_state = self.model.state_dict()\n\n with open(os.path.join(self.check_point, 'PSNR' + '.txt'), 'w') as f:\n # Train the model\n for epoch in range(self.num_epochs):\n self._epoch_step(train_dataset, epoch)\n self.scheduler.step()\n\n if epoch % 10 == 0:\n if self.verbose:\n print('Computing PSNR...')\n\n # capture running PSNR on train and val dataset\n train_psnr, train_ssim, _, _, _ = self._check_PSNR(val_dataset)\n self.hist_train_psnr.append(train_psnr)\n\n f.write('epoch%d:\\t%.3f\\n' % (epoch, train_psnr))\n\n if self.verbose:\n print('Average train PSNR:%.3fdB average ssim: %.3f' % (train_psnr, train_ssim))\n print('')\n if best_psnr < train_psnr:\n best_psnr = train_psnr\n # write the model to hard-disk for testing\n if not os.path.exists(self.check_point):\n os.makedirs(self.check_point)\n model_path = os.path.join(self.check_point, 'model.pt')\n torch.save(self.model, model_path)\n print(' Best average psnr: %.3f' % (best_psnr))\n print('')", "def training_step(self, train_batch, batch_idx):\n x, y = train_batch\n logits = self.forward(x)\n loss = self.nll_loss(logits, y)\n logs = {\"train_loss\": loss}\n return {\"loss\": loss, \"log\": logs}", "def _on_train_begins(self, val):\n self.global_rmse.append(self._compute_rmse(self.ratings))\n header_string = '{} \\t | \\t {} \\t '.format('Iteration', 'RMSE')\n num_dashes = 40\n if val is not None:\n header_string += ' \\t | \\t {}'.format('Validation RMSE')\n self.validation_rmse.append(self._compute_rmse(val))\n num_dashes = 70\n print(num_dashes*'-')\n print(header_string)\n print(num_dashes*'-')" ]
[ "0.6854838", "0.6442424", "0.61368126", "0.607955", "0.5979329", "0.596864", "0.5946029", "0.5927915", "0.5915242", "0.59119564", "0.5895071", "0.58685416", "0.5860887", "0.5857673", "0.5857673", "0.5851298", "0.5837819", "0.5815968", "0.5808178", "0.5806393", "0.5806393", "0.5806393", "0.5806393", "0.5806393", "0.5780215", "0.57647705", "0.575587", "0.5745392", "0.5743193", "0.5718439", "0.57122564", "0.569964", "0.5699057", "0.56676483", "0.5661477", "0.56529504", "0.5644803", "0.56417984", "0.5637172", "0.56246656", "0.56181496", "0.5616431", "0.5608585", "0.5606795", "0.560604", "0.5600631", "0.5592014", "0.55916244", "0.55839264", "0.5583387", "0.55714315", "0.55654466", "0.5556322", "0.5556289", "0.5556098", "0.55559605", "0.55498946", "0.55470634", "0.5543623", "0.5539927", "0.55299866", "0.55285966", "0.5526999", "0.5526835", "0.5519217", "0.551586", "0.5514619", "0.5510283", "0.5509546", "0.5509285", "0.55086744", "0.5507009", "0.55045605", "0.55045605", "0.55025834", "0.5500038", "0.54972357", "0.54939944", "0.54923874", "0.5491016", "0.549084", "0.54823935", "0.54686594", "0.54655486", "0.54593515", "0.5458309", "0.5457279", "0.54565704", "0.54557985", "0.54557985", "0.54557985", "0.54557985", "0.5455564", "0.5451339", "0.54509795", "0.5449924", "0.5449019", "0.5447481", "0.5443145", "0.5438193", "0.54256326" ]
0.0
-1
Write JSON in a consistent, humanreadable way.
def json_dumps(o: Any) -> bytes: return json.dumps( o, indent=4, sort_keys=True, ensure_ascii=True, separators=(",", ": "), cls=NumberEncoder ).encode("ascii")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self):\n self.json_o.write()", "def _write_json(self):\n with open(self._file_path, 'w') as f:\n json.dump(self._content, f, indent=4, separators=None,\n encoding='utf-8', sort_keys=False)", "def write(self, _filepath=None):\n if _filepath is None:\n _filepath = self.filepath\n _json_txt = json.dumps(self.json_dict, indent=2).splitlines()\n # json.dumps() puts a space bwetween :{ rF2 doesn't\n # So strip it out to make it easier to compare before and after\n _whitespace_removed = []\n for _line in _json_txt:\n _line = _line.replace(': {', ':{', 1)\n\n # For some reason rF2 escapes / in values\n _colon = _line.find(':')\n if _colon:\n _line = _line[:_colon] + _line[_colon:].replace('/', r'\\/')\n _whitespace_removed.append(_line)\n _json_txt = '\\n'.join(_whitespace_removed)\n\n super()._write_json_text(_json_txt, _filepath)", "def write_json(self, json_dict):\n content = json.dumps(json_dict)\n\n if isinstance(content, unicode):\n content = content.encode('utf-8')\n\n self.wfile.write(content)", "def write(self, _filepath=None):\n _json_txt = json.dumps(self.json_dict, indent=2)\n self._write_json_text(_json_txt, _filepath)", "def write_json(fd, data, indent=DEFAULT_JSON_INDENT):\n print(json.dumps(data, indent=indent), file=fd)", "def _write_json(fname, dictionary, overwrite=False, verbose=False):\n if op.exists(fname) and not overwrite:\n raise FileExistsError(f'\"{fname}\" already exists. '\n 'Please set overwrite to True.')\n\n json_output = json.dumps(dictionary, indent=4)\n with open(fname, 'w') as fid:\n fid.write(json_output)\n fid.write('\\n')\n\n if verbose is True:\n print(os.linesep + f\"Writing '{fname}'...\" + os.linesep)\n print(json_output)", "def write_json(self, jsonfile):\n with open(jsonfile, 'w') as fp:\n json.dump(self.status, fp, sort_keys=True, indent=4)\n fp.close()", "def write_json(self, f, **kw_json):\n wntr.network.io.write_json(self, f, **kw_json)", "def write_json(obj_to_write: Any, filename: str):\n \n with open(filename, 'w') as json_file:\n json.dump(obj_to_write, json_file, indent=4)", "def write_json(self, json_obj, key):\n self.put_object(json.dumps(json_obj), key)", "def write_json_to_disk(json_data, start, end):\n\tfilename = \"vote_data_\" + start + \"_\" + end\n\twith open(filename, 'w') as outfile:\n\t\tujson.dump(json_data, outfile, encode_html_chars=False, escape_forward_slashes=False, ensure_ascii=False) #Write JSON to data.json (disk)", "def write_json(toWrite):\n with open('clubs.json', 'w+') as outfile:\n json.dump(toWrite, outfile)", "def standard_json_out(json_asset, output_filename, options=None):\n\n indent = 0\n if options is not None:\n indent = options.json_indent\n\n metrics = False\n if options is not None:\n metrics = options.metrics\n\n json_asset.clean()\n if metrics:\n json_asset.log_metrics()\n\n with open(output_filename, 'w') as target:\n json_asset.json_to_file(target, True, indent)\n target.write('\\n')", "def _write_json(\n output_path, records\n):\n output_path.write_text(json.dumps(records))", "def json_write(path, dictionary):\n json_output = json.dumps(dictionary, indent=4)\n with open(path, \"w\", encoding=\"utf-8\") as f:\n f.write(json_output)", "def write_json_to_file(json_data: list, filename: str) -> None:\n print('Saving json data to ' + filename + '... ', end='', flush=True)\n with open(filename, 'w') as fd:\n json.dump(obj=json_data, fp=fd, ensure_ascii=False, indent=2)\n print('Done.')\n return", "def write(self):\r\n\r\n with open(self.filename + \".json\", mode='w') as json_file:\r\n json.dump(self.data, json_file, separators=(',', ':'))", "def write_json(self, dictionary=None):\n if not dictionary:\n dictionary = self.to_dict()\n prefix = ''\n if self._interactive:\n prefix = '.interactive'\n file_json = open(self.get_file_basename() + prefix + '.json', 'w')\n file_json.write(str(dictionary))\n file_json.close()", "async def write_json(self, data) -> None:\n print(f\"Sending: {data}\")\n await self.write(json.dumps(data, separators=(\",\", \":\")))", "def _json_write(filename, res):\n with open(filename, 'w+') as file:\n return json.dump(res, file)", "def write_json(data, file_name):\n try:\n json.dump(data, codecs.open(file_name, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4)\n return True\n except TypeError:\n print(\"No valid JSON data!\")\n raise\n except IOError:\n print(\"Could not write file to disk!\")\n raise", "def write_to_json(dictData, fileout):\n\t# Prepare the output file\n\tfout = codecs.open(fileout, 'w', 'utf-8')\n\thwDict = prepare_hw_dict(dictData)\n\tjson.dump(hwDict, fout)\n\t# Give some summary to the user\n\tprint('JSON generated. Success!')\n\tprint('{} headwords written to JSON file.'.format(len(hwDict)))", "def write_json(data, filepath):\n with open(filepath, \"w\") as f:\n content = json.dumps(data, indent=3)\n f.write(content + '\\n')", "def write_json(dictionary, filename):\r\n with open(filename, 'w') as data_file:\r\n json.dump(dictionary, data_file, indent=4, sort_keys=True)\r\n print('--> Wrote ' + os.path.basename(filename))", "def save_json(self, data, json_path=None):\n if json_path is None:\n json_path = self.json_path\n with open(json_path, encoding='utf-8', mode='w') as f:\n json.dump(data, f, indent=4, ensure_ascii=False)\n\n print(json_path)", "def write_json_file(self, fname, content):\n pass", "def write_json_file(self, fname, content):\n pass", "def write_json_data(data, filename):\n with open(filename, 'w+', encoding='utf-8') as f:\n json.dump(data, f, indent=4, ensure_ascii=False)\n f.close()", "def write_json_file(jsonfile, data):\n json_str = json.dumps(data, indent=2)\n with jsonfile.open('w') as conf_file:\n conf_file.write(json_str)", "def write(self, output):\n with open(output, 'w') as out:\n out.write(json.dumps(self, indent=4))", "def write_json_to_file(json_object, filename):\n try:\n # Try to serialize it before writing\n json_object = json.dumps(json_object)\n except TypeError:\n print(\"Failed to serialize the object\")\n try:\n json_object = json.loads(json_object)\n json_object = json.dumps(json_object)\n except TypeError:\n print(\"Failed secondary serialization of json object\")\n\n json_file = robot_dir + \"/output/original/{}_orig.json\".format(filename.replace(' ', ''))\n with open(json_file, 'w') as json_orig_file:\n json_orig_file.writelines(json_object)", "def write_json(obj, fpath):\n mkdir_if_missing(osp.dirname(fpath))\n with open(fpath, 'w', encoding='utf-8') as f:\n json.dump(obj, f, indent=4, separators=(',', ': '), ensure_ascii=False) # 添加中文支持", "def json_writer():\n with open(\"{}.json\".format(sys.argv[3]), \"w+\") as new_json:\n print(\"uploading the jason file... \")\n json.dump(json_file, new_json)\n print(\"file is done\")", "def _write_manifest_json(self, json_to_write):\n with open(os.path.join(self._crx_dir, \"manifest.json\"), \"wb\") as manifest:\n json.dump(json_to_write, manifest)", "def write_json(filepath, data):\n\n with open(filepath, 'w', encoding='utf-8') as file_obj:\n json.dump(data, file_obj, ensure_ascii=False, indent=2)", "def write_json(dict_to_write, path, file_name):\n with open(path + \"/\" + file_name + \".json\", \"w\") as fp:\n json.dump(dict_to_write, fp)\n return None", "def write_json(self, obj, mode='wb', **kwargs):\n with self.open(mode) as f:\n return json.dump(obj, f, **kwargs)", "def save_json_to_destination_file(self):\n if self.source_file != self.output_file:\n click.secho(f'Saving output JSON file to {self.output_file}', fg='white')\n with open(self.output_file, 'w') as file:\n ujson.dump(self.data, file, indent=4, encode_html_chars=True, escape_forward_slashes=False,\n ensure_ascii=False)", "def write_json_file(self, file, content):\n with open(file, \"w\", encoding=\"utf-8\") as f:\n json.dump(content, f, indent=2)", "def write_to_json(data):\n # Serializing json \n json_object = json.dumps(data, indent = 4) \n\n date = datetime.now()\n date_str = date.strftime(\"%y%m%d_%H%M\")\n \n # Writing to sample.json \n with open(\"./intraday_data/json/i_data_{0}_{1}.json\".format(date_str, int(time.time())), \"w\") as outfile: \n outfile.write(json_object)", "def writeToJson(inputObj,fileLoc):\n myFile = open(fileLoc,'w')\n json.dump(inputObj, myFile, sort_keys=True, indent=4, separators=(',', ': '))", "def write_json(json_file, in_dict):\n with open(json_file, \"w+\") as fp:\n json.dump(in_dict, fp, indent=4)", "def write_to_json(data, name):\n f = open(name + \".json\", 'w')\n json.dump(data, f, indent = 4, sort_keys = True, default = encode_json)\n f.close()", "def json_write(data, path, **kwargs):\n with open(_fsdecode(path), 'wt') as file:\n _json_dump(data, file, **kwargs)", "def write_json_user(self, response):\n\n name = str(self.get_json_cursor) + \".json\"\n dst_path = os.path.join(self.get_json_folder_path, name)\n self.write_json(name, dst_path, response)", "def saveJsonToFile(fullFilename, jsonValue, indent=2, fileEncoding=\"utf-8\"):\n with codecs.open(fullFilename, 'w', encoding=fileEncoding) as jsonFp:\n json.dump(jsonValue, jsonFp, indent=indent, ensure_ascii=False)\n # logging.debug(\"Complete save json %s\", fullFilename)", "def write(self, fp, **kwds):\n json.dump(self._dict, fp)", "def writeJSON(self, name, contents, dynamic=True):\n if dynamic:\n timestep = hoomd.context.current.system.getCurrentTimeStep()\n else:\n timestep = -1\n\n self.cpp_analyzer.writeStr(name, json.dumps(contents), timestep)", "def write(self, stream, root, order):\n json.dump(root, stream, indent=2)\n stream.write('\\n')", "def to_json_file(self, json_file_path):\n with open(json_file_path, 'w', encoding='utf-8') as writer:\n writer.write(self.to_json_string())", "def writeToFile(jsonList):\n file = open(\"data.json\", \"w\")\n file.write(json.dumps(jsonList, sort_keys=True, indent=4, separators=(',', ': ')))\n print(\"File written\")", "def to_json_file(self, json_file_path):\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())", "def save_json(data):\n data = json.dumps(data)\n\n with MEDTADATA_FILE.open('w') as outfile:\n outfile.write(data)", "def write_json(\n data: Any,\n filepath: types.PathLike,\n *,\n mode: str = \"wt\",\n encoding: Optional[str] = None,\n make_dirs: bool = False,\n lines: bool = False,\n ensure_ascii: bool = False,\n separators: tuple[str, str] = (\",\", \":\"),\n sort_keys: bool = False,\n indent: Optional[int | str] = None,\n) -> None:\n io_utils._validate_write_mode(mode)\n with io_utils.open_sesame(\n filepath, mode=mode, encoding=encoding, make_dirs=make_dirs\n ) as f:\n if lines is False:\n f.write(\n json.dumps(\n data,\n indent=indent,\n ensure_ascii=ensure_ascii,\n separators=separators,\n sort_keys=sort_keys,\n cls=ExtendedJSONEncoder,\n )\n )\n else:\n newline: Union[str, bytes] = \"\\n\" if \"t\" in mode else b\"\\n\"\n for item in data:\n f.write(\n json.dumps(\n item,\n indent=indent,\n ensure_ascii=ensure_ascii,\n separators=separators,\n sort_keys=sort_keys,\n cls=ExtendedJSONEncoder,\n )\n + newline\n )", "def write_json(fname, data, check_extension=False):\n if fname is None:\n raise ValueError(\"File name is None\")\n if check_extension:\n IOUtils.check_file_extensions(fname, ('.json', '.json.gz'))\n IOUtils.mkdirf(fname)\n with OpenFile(fname, 'w') as fobj:\n json.dump(data, fobj, indent=4)", "def save_json(self, file: Union[str, TextIO]) -> None:\n if hasattr(file, 'write'):\n file_ctx = nullcontext(file)\n else:\n file_ctx = open(file, 'w')\n\n with file_ctx as fp:\n for d in self:\n json.dump(d.dict(), fp)\n fp.write('\\n')", "def test_write_to_json():\r\n tmp_dir = os.getcwd()\r\n json_content = '{ \"name\":\"John\", \"age\":30}'\r\n directory = os.path.join(tmp_dir, 'inputspec.json')\r\n write_to_json(directory, json_content) \r\n with open(directory) as json_file:\r\n data = json.load(json_file)\r\n json_string = json.dumps(data)\r\n if os.path.exists(directory):\r\n os.remove(directory)\r\n assert json_string.replace(' ', '') == json_content.replace(' ' , '')", "def write_json_to_file(directory_string, json_content):\n with open(directory_string, \"w\") as file:\n json.dump(json_content, file, default = datetime_converter, indent=4)\n file.close()", "def save_as_json(self, json_name):\r\n with open(json_name, \"w\") as outfile:\r\n json.dump(self.pet_file, outfile) # save create json file with current information\r\n self.pet_file_name = json_name # set name to passed name\r", "def writeJsonFile(filename, data):\n try:\n with open(filename, 'w') as jsonfile:\n json.dump(data, jsonfile, indent=0, sort_keys=True)\n except IOError:\n print(\"Error writing to json file %s\" % filename)", "def save_json(self, file):\n with open(file, 'w', encoding='utf8') as f:\n json.dump(self, f, ensure_ascii=False)", "def json_dump(data):\n if OLD_SUGAR_SYSTEM is True:\n return json.write(data)\n else:\n _io = StringIO()\n jdump(data, _io)\n return _io.getvalue()", "def write(self):\n out = json.dumps({\"items\": self.items})\n sys.stdout.write(out)", "def write_to_json(missense_dict, frame_shift_dict, missense_name_dict, frame_shift_name_dict, person):\n json_file[person] = {\n \"missense_variant\": missense_dict,\n \"missense_HGNC_name\": missense_name_dict,\n \"frame_shift_variant\": frame_shift_dict,\n \"frame_shift_HGNC_name\": frame_shift_name_dict}", "def to_json(self, json_file):\n try:\n json.dump(self.container, open(json_file, 'w'), indent=4)\n except (FileNotFoundError, IOError) as err:\n print(err)", "def write_json_file(json_data, filename):\n try:\n str_data = json.dumps(json_data)\n with open(filename, \"w\") as f:\n f.write(str_data)\n return True\n except MemoryError:\n return False", "def write(obj):\n import warnings\n warnings.warn(\"simplejson.dumps(s) should be used instead of write(s)\",\n DeprecationWarning)\n return dumps(obj)", "def logjson(self, data):\n\n if not self.enabled:\n return\n\n dumps = json.dumps(\n data,\n indent=4,\n default=lambda obj: str(type(obj)),\n ensure_ascii=False\n )\n\n try:\n print(f\"```json\\n{dumps}\\n```\", file=self.fp)\n except UnicodeEncodeError:\n print(\"(encoding error occured here.)\", file=self.fp)", "def write_to_json(path: str, content: dict) -> None:\n with open(path, 'w') as f:\n f.write(json.dumps(content, ensure_ascii=False))", "def write_json_file(dictionary, path):\n path_to_save = path + \"/changes.json\"\n file = open(path_to_save, 'w+')\n file.write(json.dumps(dictionary, sort_keys=True, indent=4))\n file.close()\n return", "def write_json(self, filename):\n with open(filename, 'a+') as f:\n f.write(json.dumps(self.weights))\n f.write(\"\\n\")", "def save_json(file_name, file_content):\n with open(generate_file_path(\"output\", file_name), 'w', encoding='utf-8') as f:\n json.dump(file_content, f, ensure_ascii=False, indent=4)", "def output_json_file(dict, output_file):\n output_dir = os.path.dirname(output_file)\n # make output dir\n not os.path.exists(output_dir) and os.makedirs(output_dir)\n # change output dir mod\n os.chmod(output_dir, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH) # mode:777\n\n # write json file\n with open(output_file, 'w') as outfile:\n outfile.write(json.dumps(dict))", "def write(self, content: Union[str, list, dict]):\n with open(self._path, 'w') as f:\n if isinstance(content, str):\n f.write(content)\n else:\n json.dump(content, fp=f)", "def write_json(self, outputfile):\n outputfile.write(json.dumps(self.translations,\n sort_keys=True, indent=4))", "def save_to_file(data):\n\ttry:\n\t\toutput_file = open(\"output.json\", \"w\")\n\t\toutput_file.write(json.dumps(data))\n\texcept:\n\t print(Fore.GREEN + \"File not found or path is incorrect\")\n\tfinally:\n\t print(Fore.GREEN + \"Success go to output.json to look at the json\")", "def to_json_file(self, json_file: str = None) -> None:\n\n if self.json:\n if not json_file:\n json_file = f\"{self.id}.json\"\n\n with open(json_file, \"w\") as f:\n f.write(self.json)", "def write_as_json(filename, data):\n if not os.path.exists(os.path.dirname(OUT_DIR + filename)):\n print('creating ...')\n os.makedirs(os.path.dirname(OUT_DIR + filename))\n\n with open(OUT_DIR + filename, \"w\") as f:\n json.dump(data, f)", "def write2json(output, in_data):\n print(\"Writeing \" + output)\n with open(output, 'w') as f:\n json.dump(in_data, f, indent=4, sort_keys=True)", "def _jsonPretty(j):\n return json.dumps(j, sort_keys=True, indent=4, separators=(',', ': '))", "def dump_json(json_file, json_doc, per_line, beg=True, end=True):\n\n with open(json_file, 'a') as jsf:\n if per_line:\n jsf.write(\n '\\n'.join(json.dumps(i) for i in json_doc) + '\\n'\n )\n else:\n if beg:\n jsf.write('[\\n')\n jsf.write(\n ',\\n'.join(json.dumps(i) for i in json_doc)\n )\n if end:\n jsf.write('\\n]')\n else:\n jsf.write(',\\n')", "def store_json(data, filename):\n with open(filename, 'w') as fileout:\n fileout.write(json.dumps(data, separators=(', \\n', ': ')))", "def dump_json(json_data, file_name_template, record_id, content_description):\n file_name = file_name_template % (record_id)\n script_logging.log_status('Storing %s JSON to %s' % (content_description, file_name))\n with open(file_name, 'w') as f:\n json.dump(json_data, f, indent = 2)", "def write_tojson(data, filename) -> None:\r\n with open(\"static/json/\" + filename, \"w\") as out:\r\n out.write(\r\n json.dumps(\r\n [data[datum].__dict__() for datum in data]\r\n )\r\n )", "def dump(data, filename):\n with io.open(filename, mode='w', encoding='utf-8') as file:\n json.dump(\n data,\n file,\n encoding='utf-8',\n ensure_ascii=False,\n sort_keys=True,\n indent=4\n )", "def save_json(filename: str, data: dict):\n filepath = get_file_path(filename)\n with open(filepath, mode=\"w+\", encoding=\"UTF-8\") as f:\n json.dump(\n data,\n f,\n indent=4,\n ensure_ascii=False\n )", "def write_json_file(data, filename, dictionary=False):\n try:\n if dictionary:\n with open(filename, \"wb\") as outfile:\n json.dump(data, outfile)\n else:\n with open(filename, \"w\") as outfile:\n json.dump(data, outfile, indent=2)\n return True\n except OSError:\n return False", "def write_output(version, aliases, zones, filename):\n data = OrderedDict()\n data[\"version\"] = version\n data[\"aliases\"] = OrderedDict(sorted(aliases.items()))\n data[\"zones\"] = OrderedDict(sorted(zones.items()))\n\n with open(filename, \"w\") as jsonfile:\n json.dump(data, jsonfile, indent=2, separators=(\",\", \": \"))\n jsonfile.write(\"\\n\")", "def save_json_to_file(i):\n\n import json\n import ck.strings\n\n fn = i['json_file']\n\n if i.get('safe', '') == 'yes':\n d = i['dict']\n\n sd = {}\n\n # Check main unprintable keys\n for k in d:\n try:\n json.dumps(d[k])\n except Exception as e:\n pass\n else:\n sd[k] = d[k]\n\n i['dict'] = sd\n\n r = ck.strings.dump_json(i)\n if r['return'] > 0:\n return r\n s = r['string'].replace('\\r', '')+'\\n'\n\n return save_text_file({'text_file': fn, 'string': s})", "def write_json(filepath: str, data, overwrite: bool = True, encoder: Optional[json.JSONEncoder] = None):\n if not filepath:\n raise ValueError\n if os.path.isfile(filepath) and not overwrite:\n raise FileExistsError\n if data is None:\n raise ValueError\n with open(filepath, 'w') as f:\n json.dump(data, f, cls=encoder, indent=2)", "def write_to_file(file_name, json_data):\r\n in_tests.test_write_to_file_file_name(file_name)\r\n in_tests.test_var_type(json_data, \"json_data\", (dict, list))\r\n print(f\" Writing json to `{file_name}`...\")\r\n\r\n try:\r\n Path(file_name).parent.mkdir(parents=True, exist_ok=True)\r\n except PermissionError:\r\n print (f\"I don't have permission to create {file_name}.\\n\\\r\nTry to change {file_name} var value in `config.yaml` file or just solve this.\")\r\n with open (file_name, \"w\", encoding=\"utf8\") as f:\r\n f.write(json.dumps(json_data, indent=4, ensure_ascii=False))\r\n out_tests.test_is_file_exists(file_name)\r\n return ()", "def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent = 4)", "def save_to_json(dict, file):\n with open(file, 'w') as f:\n json.dump(dict, f, indent=2)\n return file", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent = 2, sort_keys = True) + \"\\n\"", "def write_in_json(data):\n with open('genre.json', 'w') as data_file:\n json.dump(data, data_file, indent= 4)", "def export_json(meta, data, output):\n\n formatted = { k: list(v.values()) for k, v in data.items() }\n output_file = open(output, 'w')\n output_file.write(json.dumps(formatted, cls=JSONEncoder))\n output_file.close()", "def write_json_conf(json_dict, path):\n try:\n config_json_open = io.open(path, encoding=\"utf-8\", mode=\"w\")\n config_json_open.write(unicode(json.dumps(json_dict, indent=True, sort_keys=True)))\n config_json_open.close()\n except IOError:\n log.fatal(\"couldn't write {0}\".format(path))", "def write_to_json(self, file_name, jayson, mode='w', prep_for_BQ=False):\n if prep_for_BQ == True:\n jayson = self.fix_json_keys(jayson, self.prep_json_for_BQ_callback)\n\n with open(file_name, mode) as f:\n for line in jayson:\n f.writelines(json.dumps(line) + '\\n')", "def write_json_in_file(filename, data, encoding=\"utf-8\"):\n with open(filename, \"w\", encoding=encoding) as f:\n f.write(json.dumps(data))", "def write_to_file(path, data):\n with open(path, 'w') as outfile:\n json.dump(data, outfile)" ]
[ "0.73995405", "0.73701173", "0.7319848", "0.7299197", "0.7268811", "0.71151686", "0.7094368", "0.6901286", "0.6872474", "0.6859761", "0.68393916", "0.67960197", "0.67669374", "0.6747494", "0.6735324", "0.67135006", "0.6681034", "0.6619973", "0.66117257", "0.66088766", "0.6588749", "0.65773696", "0.6557792", "0.65369034", "0.6528617", "0.6525587", "0.65137386", "0.65137386", "0.65120006", "0.6505508", "0.65010595", "0.64953846", "0.6485844", "0.6470417", "0.6465185", "0.64542186", "0.64483", "0.6447556", "0.64473", "0.64325905", "0.64199746", "0.64174336", "0.6417049", "0.6404875", "0.6391928", "0.6389856", "0.6388506", "0.63834983", "0.6376138", "0.63636786", "0.63593185", "0.6358255", "0.63570464", "0.6343868", "0.633796", "0.63378143", "0.63285846", "0.63271546", "0.6315414", "0.63129294", "0.63034856", "0.62964034", "0.6289537", "0.62750584", "0.6269771", "0.62501746", "0.6238886", "0.62337154", "0.623276", "0.6231958", "0.6230973", "0.6229883", "0.6226096", "0.6222824", "0.62205184", "0.62128997", "0.6207726", "0.6204135", "0.6201398", "0.6200144", "0.61989796", "0.6195586", "0.61916125", "0.6182608", "0.61756253", "0.6162543", "0.6152702", "0.61500067", "0.6130321", "0.6127426", "0.6118534", "0.6095457", "0.60863405", "0.6083369", "0.6077866", "0.60732174", "0.6067666", "0.6066331", "0.60606545", "0.6053356", "0.60524166" ]
0.0
-1
Read JSON in a consistent way.
def json_loads(s: Union[bytes, str]) -> Dict[str, Any]: return json.loads(ensure_text(s, "utf-8"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_json(self):\n self._fopen.seek(self._json_start, 0)\n return json.loads(self._fopen.read().decode('utf-8'))", "def read_json(self, *args, **kwargs):\n with self.open('rb') as f:\n return json.load(f, *args, **kwargs)", "def reading_json(json_file):\n with open(json_file, encoding=\"utf-8\") as file:\n r_json = json.load(file)\n return r_json", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def read_json():\n json_path = Path.home() / Path(\"pdf2notion.json\")\n if json_path.exists():\n try:\n with open(json_path) as f:\n json_data = json.load(f)\n return json_data\n except json.decoder.JSONDecodeError as e:\n print(e)\n print(type(e))", "def _locked_json_read(self):\n assert self._thread_lock.locked()\n self._file.file_handle().seek(0)\n return json.load(self._file.file_handle())", "def read_json(fullpath):\n import json\n \n with open(fullpath, \"r\") as json_file_readed:\n json_readed = json.load(json_file_readed)\n\n return json_readed", "def _json_read(filename):\n with open(filename) as file:\n return json.load(file)", "def readjson(path):\n\twith open(path, 'r') as file:\n\t\treturn json.load(file)", "def read_json(fn):\n with open(fn) as f:\n return json.load(f, object_hook=_operator_object_hook)", "def read_json(jsonfp):\n with JsonFile(jsonfp) as jsondata:\n return jsondata", "def _read_json(self, filename):\n path = os.path.join(self.script_path, filename)\n try:\n with open(path) as file:\n return json.load(file)\n except Exception as exception:\n print('Error parsing {}: {}'.format(path, str(exception)))\n raise", "def read_json(f):\n with open(f, \"r\") as fin:\n return json.loads(fin.read())", "def read_json(file_name):\n try:\n with open(file_name, \"rt\") as input_file:\n return json.loads(input_file.read())\n except TypeError:\n print(\"No valid JSON data!\")\n raise\n except IOError:\n print(\"Could not read file from disk!\")\n raise", "def read_json(self, key):\n return json.loads(self.get_object(key))", "def read_json(json_file):\n with open(json_file, \"r\") as fp:\n data = json.load(fp)\n return data", "def read_json(json_file):\n with open(json_file) as f:\n content = json.load(f)\n return(content)", "def _read_json(filename):\n with open(filename) as f:\n import json\n return json.load(f)", "def read_json(path):\n with open(path, 'r') as f:\n return json.load(f)", "def _read_metadata(self):\n self._wait_for_read_with_timeout(self.metadata_read_fd)\n flat_json = os.read(self.metadata_read_fd, MAX_METADATA_SIZE)\n os.close(self.metadata_read_fd)\n try:\n return json.loads(flat_json)\n except ValueError:\n self.logger.exception('Failed to load metadata from json')\n raise StorletRuntimeException('Got invalid format about metadata')", "def read_json_file(jsonfile):\n with jsonfile.open('r') as cfile:\n return json.loads(cfile.read())", "def _read_json(environment, call):\n fixture = load_fixture(f\"plugwise/{environment}/{call}.json\")\n return jsonpickle.decode(fixture)", "def _read_json_file(self):\n with open(self.subcfgfilename) as json_file:\n json_string = json_file.read()\n json_data = json.loads(json_string)\n return(json_data)", "def test_read_json(self, magic_0, magic_1):\n expected = {\n 'key_1': [1, 2, 3, 4, 5],\n 'key_2': ['a', 'b', 'c', 'd', 'e']\n }\n result = helpers.read_json(r\"path\")\n self.assertEqual(expected, result)", "def openJson(self):\n json_file = open(self.file, 'r')\n json_data = json_file.read()\n result = json.loads(json_data)\n return result", "def read_json_document(json_file_name):\n\n with open(json_file_name) as fd:\n data = json.load(fd)\n\n return data", "def read_json(fpath):\n with open(fpath, 'r') as f:\n obj = json.load(f)\n return obj", "def read(self):\n with open(self._path, 'r') as f:\n content = os.linesep.join(f.readlines())\n try:\n return json.loads(content)\n except json.JSONDecodeError:\n return content", "def read(self, filepath, dirpath=None):\n try:\n #filepath = os.path.normpath(filepath)\n with open(filepath) as f_p:\n try:\n self.json_dict = json.load(f_p)\n self.filepath = filepath\n return self.json_dict\n except ValueError as err:\n print('JSON content error in \"%s\"' % filepath)\n print(err)\n except (IOError, FileNotFoundError):\n print(\n 'Failed to open JSON file \"%s\" \"%s\"' %\n (os.path.abspath(''), filepath))\n raise NoSuchFileError(filepath)\n raise JsonContentError", "def read_json(self, json_name):\r\n with open(json_name, 'r') as infile:\r\n self.pet_file = json.load(infile) # load existing json file\r\n self.pet_file_name = json_name # set name to passed name\r", "def load_json(self):\n\n self.load_json_str(self.get_json_str())", "def read_json(filename):\n # Implement this function\n file = open(filename)\n text = file.read()\n result = json.loads(text)\n file.close()\n return result", "def read_json(file):\n with open(file, \"r\") as fid:\n return json.load(fid)", "def read_json(json_file):\n with open(json_file) as schema:\n val = json.load(schema)\n\n return val", "def read_json(fname, check_extension=False):\n if fname is None:\n return None\n if check_extension:\n IOUtils.check_file_extensions(fname, ('.json', '.json.gz'))\n with OpenFile(fname, 'r') as fobj:\n return json.load(fobj)", "def readJSON(filename):\n try:\n with open(filename) as f:\n for line in f:\n data = json.loads(line.strip())\n except:\n print \"Failed to read data!\"\n return []\n print \"The json file has been successfully read!\"\n return data", "def read_json(fname):\n with open(fname) as f:\n d = json.load(f)\n return d", "def read_json(self, stock_name):\n with open(f\"{self.json_path}/{stock_name}.json\") as json_file:\n json_data = json.load(json_file)\n if self.debug:\n print(f\" JsonHelper.read_json() --> read {self.json_path}/{stock_name}.json success\")\n return json_data", "def read_from_json(json_path):\n\n # Read data\n with open(json_path, 'r') as json_file:\n data = json.load(json_file)\n \n return data", "def read_json(path):\n with open(path) as json_file:\n data = json.load(json_file)\n return data", "def read(self,json_data):\n read_path = (DATA_NODE_DIR % (self.server_id,)) + os.path.sep + json_data['command']['read']['chunk']\n with open(read_path, 'r') as f_in:\n f_in.seek(json_data['command']['read']['offset'])\n content = f_in.read(json_data['command']['read']['count'])\n print(content)\n return {'command':'read','finish':1}", "def _raw_read(self, filepath, dirpath=None):\n self.json_dict = super().read(filepath)\n return self.json_dict", "def util_load_json(path):\n with io.open(path, mode=\"r\", encoding=\"utf-8\") as f:\n return json.loads(f.read())", "def read_json_file_to_be_edited(self):\n _json_file = self.job[\"JSONfileToBeEdited\"]\n if _json_file in self.config:\n # Substitute the path defined in the macro\n _json_file = self.config[_json_file]\n self.json_o.read(_json_file)", "def read_json_file(json_file, project_logger, json_not_found_error_code, json_bad_format_error_code):\n try:\n # Open the file\n with open(json_file) as F:\n # Read the file's contents as a string\n json_str = F.read()\n # Return the data as a Python dictionary\n return json.loads(json_str)\n except FileNotFoundError:\n project_logger.log_error(\n json_not_found_error_code,\n 'Could not open json file \"' + str(json_file) + '\": JSON file could not be found'\n )\n sys.exit(1)\n except json.decoder.JSONDecodeError:\n project_logger.log_error(\n json_bad_format_error_code,\n 'Could not open json file \"' + str(json_file) + '\": JSON file not formatted properly'\n )\n sys.exit(1)", "def read_json(filepath):\n return json.loads(open(filepath).read())", "def stream_read_json(f):\n\n end_symbol = bytes(']', 'utf-8')\n start_pos = 1\n while True:\n try:\n obj = json.load(f)\n yield obj\n return\n except json.JSONDecodeError as e:\n f.seek(start_pos)\n json_str = f.read(e.pos)\n obj = json.loads(json_str)\n start_pos += e.pos + 1\n a = f.read(1)\n if a == end_symbol:\n yield obj\n return\n yield obj", "def read_json(filepath):\n if (filepath in _json_cache):\n return _json_cache[filepath]\n with open(filepath, 'r', encoding='utf-8') as fileinfo:\n data = json.load(fileinfo)\n _json_cache[filepath] = data\n return data", "def read_json_breakdown(cls, fname):\n if not os.path.exists(fname):\n raise RuntimeError\n\n with open(fname, 'r') as data_file:\n return cls.fixup_from_json(data_file.read())", "def read_json():\n global key_data\n with open(USER_JSONFILE) as fobj:\n key_data = json.load(fobj)", "def _read_jsonl(cls, input_file):\n with open(input_file, 'rb') as f:\n return [json.loads(ln) for ln in f]", "def _read_jsonl(cls, input_file):\n with open(input_file, 'rb') as f:\n return [json.loads(ln) for ln in f]", "def example_json42(example_json_file42):\n return json.loads(example_json_file42)", "def example_json41(example_json_file41):\n return json.loads(example_json_file41)", "def read_json():\n return json.loads(web.data())", "def process_json(path):\n path = os.path.abspath(os.path.expanduser(path))\n try:\n with open(path) as f:\n return json.load(f, object_hook=ascii_encode_dict)\n except ValueError as e:\n logging.error(\"File: %s\\nInvalid JSON:\\n%s\", str(path), str(e))\n raise\n except IOError as io:\n logging.error(\"Provided json file path does not exist %s\", str(path))\n raise", "def read_json(filename):\r\n\r\n with open(filename, 'r') as data_file:\r\n return json.load(data_file)", "def example_json43(example_json_file43):\n return json.loads(example_json_file43)", "def read_json(self, f, **kw_json):\n return wntr.network.io.read_json(f, append=self, **kw_json)", "def read_json():\n try:\n rospack = rospkg.RosPack()\n file_path = rospack.get_path('autonomous') + \"/src/data.txt\"\n with open(file_path) as json_file:\n json_data = json.load(json_file)\n \n new_data = []\n for d in json_data:\n a = Autons(len(new_data))\n a.deserialize_json(d)\n new_data.append(a)\n\n global data\n data = new_data\n except:\n read_json()", "def parse_json():\n parsed = None\n try:\n path = sys.argv[1]\n except IndexError as idx_err:\n try:\n return json.load(sys.stdin)\n except ValueError as err:\n raise (ValueError,'Malformed JSON via stdin. Should have keys incomes, expenses. You can also pass a json file path as an argument')\n else:\n try:\n with open(path, 'r') as data:\n return json.load(data)\n except ValueError as val_err:\n raise(ValueError, 'Malformed JSON! Should have keys incomes, expenses')", "def read_json():\n with open('clubs.json') as json_file:\n return json.load(json_file)", "def _read_json(cls, file_name):\n\n json_file = os.path.join(cls.module_dir, file_name)\n\n with open(json_file, 'r', encoding='utf8') as f:\n return json.load(f)", "def read(fname):\n # Read string from JSON file.\n with open(fname, 'r') as fi:\n serial = fi.read()\n\n # Decode.\n decoder = json.JSONDecoder(object_hook=numpy_hook)\n data = decoder.decode(serial)\n\n return data", "def read_json(file_path):\n with open(file_path) as file:\n return json.load(file)", "def test_loads_a_non_object_json_file(self):\n from test.resources import simple_json\n self.assertEqual(simple_json._data, 'test')", "def _readin_JSON(file):\n\tdef object_decoder(obj):\n\t\t\"\"\"This function is used to properly load the JSON elements into the corresponding classes.\"\"\"\n\t\tif 'logfile' in obj:\n\t\t\treturn logfile(obj['logfile']['name'], obj['logfile']['lines'], obj['logfile']['type'], obj['logfile']['content'], obj['logfile']['sources'])\n\t\tif 'logfile_entry' in obj:\n\t\t\tif len(obj['logfile_entry']['timestamp']['datetime']) >= 20 :\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S.%f\")\n\t\t\telif obj['logfile_entry']['timestamp']['datetime'][-6:-5] != '+':\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S\")\n\t\t\telse:\n\t\t\t\tunformatted_date = obj['logfile_entry']['timestamp']['datetime']\n\t\t\t\tunformatted_date = unformatted_date[:-3]+unformatted_date[-2:]\n\t\t\t\t# once again, related to missing features in Python 3.6\n\t\t\t\tdate = datetime.datetime.strptime(unformatted_date,\"%Y-%m-%dT%H:%M:%S.%f%z\")\n\t\t\treturn logfile_entry(obj['logfile_entry']['id'], file, obj['logfile_entry']['message'], obj['logfile_entry']['structured_data'], date,obj['logfile_entry']['hostname'],obj['logfile_entry']['source'])\n\t\treturn obj\n\n\tfp = open(file,'r')\n\tlf = json.load(fp, object_hook=object_decoder)\n\tfp.close()\n\treturn lf", "def parse_json(json_file):\n try:\n with open(json_file, \"r\") as file_reader:\n file_contents = file_reader.read()\n return json.loads(file_contents)\n except FileNotFoundError:\n raise FileNotFoundError(\"File not found.\")", "def test_load_json():\n schema = pa.schema([\n pa.field(\"foo\", pa.int32()),\n pa.field(\"bar\", pa.int64())\n ])\n\n path = \"{}/tests/fixtures/simple_json.txt\".format(os.getcwd())\n\n converted_data = client.load_json(path, schema)\n assert converted_data.to_pydict() == {'foo': [1, 10], 'bar': [2, 20]}", "def json_reader(file_path):\n\n with open(file_path) as file:\n json_dict = json.load(file)\n\n return json_dict", "def readjson(self, jsonfile):\n with open(jsonfile, 'r') as infile:\n config = json.load(infile)\n return config", "def loadJSON(jsonData):\n\n if hasattr(jsonData, 'read'):\n loadedjson = json.load(jsonData)\n elif isinstance(jsonData, str):\n if os.path.exists(jsonData):\n with open(jsonData) as jsonFile:\n loadedjson = json.load(jsonFile)\n else:\n try:\n loadedjson = json.loads(jsonData)\n except JSONDecodeError as e:\n raise ValueError(f\" {str(e)}: Got {jsonData}, either bad format of file does not exist\")\n\n elif isinstance(jsonData, dict):\n loadedjson = jsonData\n else:\n err = f\"workflow type: {type(jsonData)} is unknonw. Must be str, file-like or dict. \"\n raise ValueError(err)\n\n\n return loadedjson", "def read_json(filename: str):\r\n\r\n print(\"Reading json file \" + filename + \"...\")\r\n file = open(filename, \"r\")\r\n instance = json.load(file)\r\n f.close()\r\n print(\"Done\")\r\n\r\n return instance", "def read_json(self, filename):\n with open(filename) as f:\n for line in f:\n pass\n lastWeight = json.loads(line)\n\n return lastWeight", "def read_json(filepath: str):\n if not filepath:\n raise ValueError\n if not os.path.isfile(filepath):\n raise FileNotFoundError\n with open(filepath) as f:\n return json.load(f)", "def read_file(path):\n with open(path) as json_file:\n data = json.load(json_file)\n return data", "def read_json_file(self, fname):\n return {}", "def test_json_reader_data_contents(process_data):\n json_data = process_data(file_name_or_type='scooter_data.json')\n for val in json_data:\n assert(isinstance(val['id'], int))\n assert(isinstance(val['name'], str))\n assert(isinstance(val['vin_number'], str))\n assert(isinstance(val['electric_scooter'], bool))\n assert(isinstance(val['city'], str))\n assert(isinstance(val['usage'], str))\n assert(isinstance(val['cost_usd'], float))\n assert(isinstance(val['total_years_of_use'], int))", "def _localloadjson(path: str) -> JSONType:\n with open(path, encoding=\"utf-8\") as fh:\n return json.load(fh)", "def open_json(path):\n with open(path, \"r\") as json_data_file:\n data = json.load(json_data_file)\n return data", "def read(self):\r\n try:\r\n with open(self.filename, 'r') as f:\r\n self.__config = json.load(f)\r\n except (IOError, OSError) as e:\r\n # File reading error\r\n if not os.path.exists(self.filename):\r\n self.__config = {}\r\n else:\r\n raise\r\n except ValueError:\r\n # JSON decoding error\r\n raise", "def read_json(filename: Union[pathlib.Path, str]):\n if isinstance(filename, str):\n filename = pathlib.Path(filename)\n with open(filename) as fh:\n return json.loads(fh.read())", "def read_from_json(self, filename=None):\n\n try:\n if filename == None:\n\n filename = THIS_PATH+str(self.json_filename)+str(SUFFIX_REVISED)+'.json'\n #print (filename)\n with open(filename, 'r') as input_file:\n try:\n self.data_openei = json.load(input_file)\n except ValueError:\n\n print('cant parse json')\n return 1\n except Exception as e:\n\n print(('cant open file' + str(e)))\n return 2\n\n # Encode the start/end dates as integers\n for block in self.data_openei:\n block['enddate'] = datetime.strptime(block['enddate'], '%Y-%m-%dT%H:%M:%S.000Z').replace(tzinfo=pytz.timezone('UTC'))\n block['startdate'] = datetime.strptime(block['startdate'], '%Y-%m-%dT%H:%M:%S.000Z').replace(tzinfo=pytz.timezone('UTC'))\n\n return 0 # everything went well", "def read_json(file):\n\n with open(file, 'r') as fp:\n data = json.load(fp)\n return data", "def read(s):\n import warnings\n warnings.warn(\"simplejson.loads(s) should be used instead of read(s)\",\n DeprecationWarning)\n return loads(s)", "def read_json(filepath):\n\n with open(filepath, 'r', encoding='utf-8') as file_obj:\n data = json.load(file_obj)\n\n return data", "def load_from_json(self, json_fp: str):\n # TODO:\n pass", "def parse_json(json_path):\n with open(json_path, 'r') as f:\n out = json.load(f)\n return out", "def _read_jsonl(cls, input_file):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n lines = []\n for line in f:\n lines.append(json.loads(line))\n return lines", "def import_json(self) -> dict:\n with open(self.path, encoding=\"utf8\") as json_file:\n return json.load(json_file)", "def load_json(jsonfile):\n with open(jsonfile) as f:\n return json.load(f)", "def load_json(jsonfile):\n with open(jsonfile) as f:\n return json.load(f)", "def readJsonFile(filename):\n try:\n with open(filename, 'r') as jsonfile:\n data = json.load(jsonfile)\n return data\n except IOError:\n print(\"Error reading json file: %s - using blank list\" % filename)\n return []", "def read_json(file_path):\n return json.loads(strip_comments(read_text(file_path)))", "def test_read_json2():\n s = JsonSource()\n g = s.parse(os.path.join(RESOURCE_DIR, 'valid.json'), provided_by='Test JSON')\n nodes = {}\n edges = {}\n for rec in g:\n if rec:\n if len(rec) == 4:\n edges[(rec[0], rec[1])] = rec[3]\n else:\n nodes[rec[0]] = rec[1]\n\n assert len(nodes.keys()) == 6\n assert len(edges.keys()) == 5\n\n n = nodes['MONDO:0017148']\n assert 'id' in n and n['id'] == 'MONDO:0017148'\n assert n['name'] == 'heritable pulmonary arterial hypertension'\n assert n['category'][0] == 'biolink:Disease'\n assert 'Test JSON' in n['provided_by']\n\n e = edges[('HGNC:11603', 'MONDO:0017148')]\n assert e['subject'] == 'HGNC:11603'\n assert e['object'] == 'MONDO:0017148'\n assert e['predicate'] == 'biolink:related_to'\n assert e['relation'] == 'RO:0004013'\n assert 'Test JSON' in e['provided_by']", "def read_json(filename):\n with open(filename, 'r') as f:\n data = json.load(f)\n return data", "def example_json40(example_json_file40):\n return json.loads(example_json_file40)", "def _loadJson(self, file):\n # TODO : Is it paranoid checking?\n if os.path.isfile(file):\n try:\n with open(file, 'r') as f:\n data = json.load(f)\n return data\n except ValueError:\n msg = \"Corrupted JSON file => %s\" % file\n # logger.error(msg)\n self._exception(200, msg)\n # return -2 # code for corrupted json file\n else:\n msg = \"File cannot be found => %s\" % file\n self._exception(201, msg)" ]
[ "0.72466516", "0.71838474", "0.7122567", "0.70307904", "0.70307904", "0.7005705", "0.7005705", "0.69405353", "0.6842769", "0.6829329", "0.6786115", "0.6777155", "0.67250484", "0.6718409", "0.6713313", "0.6706307", "0.6697732", "0.6672447", "0.6669862", "0.6629288", "0.6623848", "0.6614688", "0.65974957", "0.65810955", "0.6557045", "0.6548853", "0.6547689", "0.65163654", "0.6510202", "0.64768773", "0.64734966", "0.64616", "0.6455399", "0.64543426", "0.6431759", "0.64307827", "0.64241207", "0.64216155", "0.6410435", "0.6403907", "0.63951594", "0.6394514", "0.6393132", "0.6389358", "0.63873744", "0.63580036", "0.63551176", "0.6346061", "0.6345516", "0.63395435", "0.6333612", "0.63323843", "0.63299036", "0.6301884", "0.6301884", "0.63010037", "0.6285055", "0.6282574", "0.627401", "0.62691283", "0.62621105", "0.62603414", "0.6249144", "0.6247082", "0.6245483", "0.6244027", "0.62399054", "0.62279975", "0.62204957", "0.62141466", "0.6213846", "0.62012005", "0.61852664", "0.6180184", "0.61779517", "0.61731285", "0.61720836", "0.6171223", "0.6159754", "0.6155287", "0.6150645", "0.6140657", "0.61399835", "0.6136992", "0.61302316", "0.61257863", "0.6119226", "0.61102194", "0.61062354", "0.6103962", "0.6102175", "0.6097066", "0.6093216", "0.6087577", "0.6087577", "0.60783416", "0.60683465", "0.6067502", "0.60600334", "0.6058522", "0.6056631" ]
0.0
-1
Convenience function to normalize the `shape` argument.
def normalize_shape(shape: Union[int, Tuple[int, ...], None]) -> Tuple[int, ...]: if shape is None: raise TypeError("shape is None") # handle 1D convenience form if isinstance(shape, numbers.Integral): shape = (int(shape),) # normalize shape = cast(Tuple[int, ...], shape) shape = tuple(int(s) for s in shape) return shape
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(shape):\n s = shape\n matrix = Shape.get_matrix(s.get_vector())\n norm_x = math.sqrt(sum(matrix[:, 0] ** 2))\n norm_y = math.sqrt(sum(matrix[:, 1] ** 2))\n for pt in s.pts:\n pt.x /= norm_x\n pt.y /= norm_y\n return s", "def _normalize_shape(shape):\n\n if isinstance(shape, (np.integer, int)):\n if shape < 1:\n raise ValueError(\"shape value must be greater than 0: %d\"\n % shape)\n shape = (shape,) # N is a shorthand for (N,)\n try:\n shape = tuple(shape)\n except TypeError:\n raise TypeError(\"shape must be an integer or sequence: %r\"\n % (shape,))\n\n # XXX Get from HDF5 library if possible.\n # HDF5 does not support ranks greater than 32\n if len(shape) > 32:\n raise ValueError(\n f\"shapes with rank > 32 are not supported: {shape!r}\")\n\n return tuple(SizeType(s) for s in shape)", "def unchanged_shape(input_shape):\n return input_shape", "def roi_normalise(roi, shape):\n\n def fill_if_none(x, val_if_none):\n return val_if_none if x is None else x\n\n def norm_slice(s, n):\n start = fill_if_none(s.start, 0)\n stop = fill_if_none(s.stop, n)\n start, stop = [x if x >= 0 else n+x for x in (start, stop)]\n return slice(start, stop, s.step)\n\n if not isinstance(shape, collections.abc.Sequence):\n shape = (shape,)\n\n if isinstance(roi, slice):\n return norm_slice(roi, shape[0])\n\n return tuple([norm_slice(s, n) for s, n in zip(roi, shape)])", "def _normalize_coordinates(\n target_size: int, coords: np.ndarray, original_size: Tuple[int, int], is_bounding_box=False\n) -> np.ndarray:\n old_height, old_width = original_size\n\n scale = target_size * 1.0 / max(old_height, old_width)\n new_height, new_width = old_height * scale, old_width * scale\n new_width = int(new_width + 0.5)\n new_height = int(new_height + 0.5)\n\n coords = deepcopy(coords).astype(float)\n\n if is_bounding_box:\n coords = coords.reshape(-1, 2, 2)\n\n coords[..., 0] = coords[..., 0] * (new_width / old_width)\n coords[..., 1] = coords[..., 1] * (new_height / old_height)\n\n if is_bounding_box:\n coords = coords.reshape(-1, 4)\n\n return coords", "def normalize(inp):\n\n out = inp / np.linalg.norm(inp, axis=1, keepdims=True)\n\n return out", "def __normalize(input, type, a, b):\n return cv2.normalize(input, None, a, b, type)", "def processed_shape(self, shape):\n return shape", "def normalize_chunks(chunks: Any, shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]:\n\n # N.B., expect shape already normalized\n\n # handle auto-chunking\n if chunks is None or chunks is True:\n return guess_chunks(shape, typesize)\n\n # handle no chunking\n if chunks is False:\n return shape\n\n # handle 1D convenience form\n if isinstance(chunks, numbers.Integral):\n chunks = tuple(int(chunks) for _ in shape)\n\n # handle bad dimensionality\n if len(chunks) > len(shape):\n raise ValueError(\"too many dimensions in chunks\")\n\n # handle underspecified chunks\n if len(chunks) < len(shape):\n # assume chunks across remaining dimensions\n chunks += shape[len(chunks) :]\n\n # handle None or -1 in chunks\n if -1 in chunks or None in chunks:\n chunks = tuple(s if c == -1 or c is None else int(c) for s, c in zip(shape, chunks))\n\n chunks = tuple(int(c) for c in chunks)\n return chunks", "def changeInputShape(self,shape):\n self.input_shape = shape", "def set_shape(self, shape):\n self._shape = self._shape.merge_with(shape)", "def normalize(a, axis=None):\n a_sum = a.sum(axis)\n if axis and a.ndim > 1:\n a_sum[a_sum == 0] = 1\n shape = list(a.shape)\n shape[axis] = 1\n a_sum.shape = shape\n\n return a / a_sum", "def _normalized_window(shape, num_samples):\n if shape == \"rectangular\":\n return 1.0, 1.0\n t_max = _default_window_t_max.get(shape, None)\n if t_max is None:\n raise ValueError(f\"Window function {shape} is not supported.\")\n tlist = np.linspace(0, t_max, num_samples)\n if shape in _analytical_window:\n coeff = _analytical_window[shape](tlist)\n else:\n coeff = signal.windows.get_window(shape, num_samples)\n return coeff, tlist", "def _normalize(weights, axis, log=True):\n if log:\n normalizer = tf.reduce_logsumexp(weights, axis=axis, keepdims=True)\n return weights - normalizer\n normalizer = tf.reduce_sum(weights, axis=axis)\n return weights / normalizer", "def _normalize(self, inp):\n \n return inp/inp.sum()", "def preprocess(img, out_shape=None):\n if out_shape is not None:\n img = resize(img, out_shape, mode='constant')\n\n # Normalize the image\n mean = img.mean()\n std = img.std()\n return (img - mean) / std", "def convert_shape_to_units(self, shape):\n unit_factor = [GDS[\"unit\"][0]] * 2\n ll=shape[0].scale(unit_factor)\n ur=shape[1].scale(unit_factor)\n return [ll,ur]", "def init_he_scale(shape, slope=1.0):\n fan_in = np.prod(shape[:-1])\n return np.sqrt(2. / ((1. + slope**2) * fan_in))", "def _normalize(X: np.ndarray) -> np.ndarray:\n # return X * np.sqrt(1 / np.sum(X ** 2, axis=1))[:, None]\n return X * np.sqrt(X.shape[1] / np.sum(X ** 2, axis=1))[:, None]", "def compute_output_shape(self, input_shape):\r\n return input_shape", "def layer_norm(input, normalized_shape, weight, bias, eps=1e-5):\n return FunctionLib.apply(\n 'LayerNorm', input.device, [input, weight, bias],\n axis=input.ndimension() - len(normalized_shape), epsilon=eps)", "def _partial_flatten_and_normalize(x):\n x = np.reshape(x, (x.shape[0], -1))\n return (x - np.mean(x)) / np.std(x)", "def reshape(self, *shape):\n return F.Reshape.apply(self, shape)", "def _normalize_tensor(input_tensor):\n\n rms_tensor = K.sqrt(K.mean(K.square(input_tensor)))\n return input_tensor / (rms_tensor + K.epsilon())", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def partial_flatten_and_normalize(x):\n x = np.reshape(x, (x.shape[0], -1))\n return (x - np.mean(x)) / np.std(x)", "def normalize(a):\n a = np.array(a)\n return a / np.linalg.norm(a)", "def _maybe_validate_shape_override(self, override_shape, base_is_scalar,\n validate_args, name):\n if override_shape is None:\n override_shape = []\n\n override_shape = ops.convert_to_tensor(override_shape, dtype=dtypes.int32,\n name=name)\n\n if not override_shape.dtype.is_integer:\n raise TypeError(\"shape override must be an integer\")\n\n override_is_scalar = _is_scalar_from_shape(override_shape)\n if tensor_util.constant_value(override_is_scalar):\n return self._empty\n\n dynamic_assertions = []\n\n if override_shape.get_shape().ndims is not None:\n if override_shape.get_shape().ndims != 1:\n raise ValueError(\"shape override must be a vector\")\n elif validate_args:\n dynamic_assertions += [check_ops.assert_rank(\n override_shape, 1,\n message=\"shape override must be a vector\")]\n\n if tensor_util.constant_value(override_shape) is not None:\n if any(s <= 0 for s in tensor_util.constant_value(override_shape)):\n raise ValueError(\"shape override must have positive elements\")\n elif validate_args:\n dynamic_assertions += [check_ops.assert_positive(\n override_shape,\n message=\"shape override must have positive elements\")]\n\n is_both_nonscalar = _logical_and(_logical_not(base_is_scalar),\n _logical_not(override_is_scalar))\n if tensor_util.constant_value(is_both_nonscalar) is not None:\n if tensor_util.constant_value(is_both_nonscalar):\n raise ValueError(\"base distribution not scalar\")\n elif validate_args:\n dynamic_assertions += [check_ops.assert_equal(\n is_both_nonscalar, False,\n message=\"base distribution not scalar\")]\n\n if not dynamic_assertions:\n return override_shape\n return control_flow_ops.with_dependencies(\n dynamic_assertions, override_shape)", "def normalize(X, norm=..., *, axis=..., copy=..., return_norm=...):\n ...", "def normalize_weights(w, dims=(0,), bias=1e-5):\n with tf.name_scope('normalization'):\n return w / (tf.sqrt(tf.reduce_sum(tf.square(w), dims, keep_dims=True) + bias))", "def normalize(w: torch.Tensor):\n\n if w.dim() > 1:\n return _matrix(w)\n\n return _vector(w)", "def normalize(tensor: np.ndarray):\n if len(tensor.shape) < 4:\n tensor = np.expand_dims(tensor, axis=2)\n mean = np.array([tensor[..., chn, :].mean() for chn in range(tensor.shape[2])])\n std = np.array([tensor[..., chn, :].std() for chn in range(tensor.shape[2])])\n return (tensor - mean[:, np.newaxis]) / std[:, np.newaxis]", "def orthogonal(shape, dtype=tf.float32, partition_info=None):\n # taken from https://github.com/cooijmanstim/recurrent-batch-normalization\n # taken from https://gist.github.com/kastnerkyle/f7464d98fe8ca14f2a1a\n flat_shape = (shape[0], np.prod(shape[1:]))\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v # pick the one with the correct shape\n q = q.reshape(shape)\n return tf.constant(q[:shape[0], :shape[1]], dtype)", "def scale_shape(shape, x_amp, y_amp, x_offset, y_offset):\n x_list =[]\n y_list =[]\n new_shape = []\n # Split the list into separate x and y lists. \n for i in range(len(shape)/2):\n x_list.append(shape[2*i])\n y_list.append(shape[2*i + 1])\n\n # Amplify, add offsets and re-interleave the x and y components.\n for j in range(len(x_list)):\n x_list[j] = ( x_list[j] * x_amp ) + x_offset \n new_shape.append( x_list[j] )\n y_list[j] = ( y_list[j] * y_amp ) + y_offset \n new_shape.append( y_list[j] ) \n return new_shape", "def _normalize(\n ds: xr.Dataset,\n *,\n dim: Sequence[str],\n kind: str = ADDITIVE,\n) -> xr.Dataset:\n if \"norm\" in ds:\n norm = ds.norm\n else:\n norm = ds.data.mean(dim=dim)\n norm.attrs[\"_group_apply_reshape\"] = True\n\n return xr.Dataset(\n dict(data=apply_correction(ds.data, invert(norm, kind), kind), norm=norm)\n )", "def normalize(nparray, order=2, axis=0):\n norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)\n return nparray / (norm + np.finfo(np.float32).eps)", "def reshape(x, shape):\n return Reshape(shape)(x)", "def _ndims_from_shape(shape):\n if shape.get_shape().ndims not in (None, 1):\n raise ValueError(\"input is not a valid shape: not 1D\")\n if not shape.dtype.is_integer:\n raise TypeError(\"input is not a valid shape: wrong dtype\")\n if shape.get_shape().is_fully_defined():\n return constant_op.constant(shape.get_shape().as_list()[0])\n return array_ops.shape(shape)[0]", "def normalised(a: np.ndarray, order: int = None, axis: int = -1) -> np.ndarray:\n norm = np.atleast_1d(np.linalg.norm(a, order, axis))\n return a / np.expand_dims(norm, axis)", "def normalize(input_tensor, output_tensor):\n image_dims = utils.get_img_shape(input_tensor)[1:]\n return output_tensor / np.prod(image_dims)", "async def infer_shape_reshape(track, v, shape):\n shp = await shape['value']\n if shp == ANYTHING:\n shp_t = await shape['type']\n shp = (ANYTHING,) * len(shp_t.elements)\n v_shp = await v['shape']\n if (all(s is not ANYTHING for s in shp) and\n all(s is not ANYTHING for s in v_shp) and\n prod(shp) != prod(v_shp)):\n raise MyiaShapeError(\"Cannot change the total number of elements \"\n \"in reshape\")\n return shp", "def normalize(v):\n\tdim = v.shape \n\tfor i in range(0, dim[0]-1):\n\t\tv[i,:,:] = (v[i,:,:].T/np.sum(v[i,:,:],1)).T\n\n\treturn v", "def Shape(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_UnifySameDomain_Shape(self, *args)", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def reshape(x, shape):\n return float(x) if shape is None else jnp.reshape(x, shape)", "def normalize(v):\n return np.array(v) / np.linalg.norm(v)", "def contract_to_shape(data, shape, dtype=None):\n if dtype is None:\n dtype = data.dtype\n if shape==data.shape:\n return data.astype(dtype)\n slices = []\n for s1, s2 in zip (data.shape, shape):\n slices.append(slice((s1-s2)//2, (s1+s2)//2))\n return data[tuple(slices)].astype(dtype)", "def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr", "def _fit_array_to_image(base_shape, array: np.ndarray) -> np.ndarray:\n shape = list(array.shape)\n for i, el in enumerate(base_shape):\n if el == 1 and el != shape[i]:\n shape.insert(i, 1)\n elif el != shape[i]:\n raise ValueError(f\"Wrong array shape {shape} for {base_shape}\")\n if len(shape) != len(base_shape):\n raise ValueError(f\"Wrong array shape {shape} for {base_shape}\")\n return np.reshape(array, shape)", "def norm_boxes_graph(boxes, shape):\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)", "def sample_n_shape_converter(size):\n if size is None:\n return size\n if size == ():\n size = None\n else:\n if isinstance(size, int):\n size = (size,)\n size = (-2,) + size\n return size", "def test_normalize(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n assert np.abs(np.mean(_image)-0) < 1e-8\n assert np.abs(np.std(_image)-1) < 1e-8", "def set_model_input_shape_attr(model, dataset=None, input_shape=None):\n if not hasattr(model, 'input_shape'):\n model.input_shape = _validate_input_shape(dataset, input_shape)", "def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.compat.v1.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)", "def standardize_single_array(x):\n if x is None:\n return None\n if tensor_util.is_tensor(x):\n x_shape_ndims = array_ops.rank(x)\n else:\n x_shape_ndims = len(x.shape)\n\n if (x_shape_ndims == 1 and (expected_shape is None or len(expected_shape) != 1)):\n if tensor_util.is_tensor(x):\n x = array_ops.expand_dims(x, axis=1)\n else:\n x = np.expand_dims(x, 1)\n return x", "def get_param_scale_shape(shape_x, shape_scale):\n\n length_x = len(shape_x)\n length_scale = len(shape_scale)\n\n if length_scale == 1 and shape_scale[0] == 1:\n shape = [1] * length_x\n else:\n shape = list(shape_scale)\n\n return shape", "def normalize(v):\n return v / np.linalg.norm(v)", "def reshape(self, *shape):\n return Signal(self._initial_value.reshape(*shape),\n name=\"%s.reshape(%s)\" % (self.name, shape),\n base=self.base)", "def norm_boxes(boxes, shape):\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.divide((boxes - shift), scale).astype(np.float32)", "def normalize(img, mean, std, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n mean = paddle.to_tensor(mean, place=img.place)\n std = paddle.to_tensor(std, place=img.place)\n\n if _is_channel_first(data_format):\n mean = mean.reshape([-1, 1, 1])\n std = std.reshape([-1, 1, 1])\n\n return (img - mean) / std", "def he_normal(weight_shape):\n if len(weight_shape) == 4:\n fW, fH, fC, _ = weight_shape\n return np.random.normal(0, np.sqrt(2 / (fW*fH*fC)), weight_shape)\n num_input, _ = weight_shape\n return np.random.normal(0, np.sqrt(2 / num_input), weight_shape)", "def normalize(arr, eps):\n\n norm = cuda.reduce('T x', 'T out',\n 'x * x', 'a + b', 'out = sqrt(a)', 0,\n 'norm_sn')(arr)\n cuda.elementwise('T norm, T eps',\n 'T x',\n 'x /= (norm + eps)',\n 'div_sn')(norm, eps, arr)\n return norm", "def _infer_shape(schema):\n for feature in schema.feature:\n # Currently we infer shape only for features with valency 1.\n if (feature.presence.min_fraction == 1 and\n feature.value_count.min == feature.value_count.max == 1):\n feature.shape.dim.add().size = 1", "def local_to_normalized(npboxes: np.ndarray, window: Box):\n height, width = window.size\n return npboxes / np.array([[height, width, height, width]])", "def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)", "def _optimizeshape(shape):\n shape.sort()\n if ORDER == 'C':\n shape[:] = shape[::-1]", "def mean(X, shape):\n return np.real(X[0, 0]) / (shape[0] * shape[1])", "def normalize(x):\n\n x_norm = np.linalg.norm(x, axis=1, keepdims=True)\n print(x_norm)\n x = x / x_norm\n ### END\n\n return x", "def get_shape(shape):\n try:\n return (shape[0], shape[1]) + shape[2:]\n except IndexError:\n return (shape[0], 0) + shape[2:]\n except TypeError:\n return int(shape), 0", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalize(w):\n s = sum(w)\n for i in range(len(w)):\n w[i] /= s\n return w", "def layer_normalize_(self, ref_point: 'ModelParameters', order=2):\n # in-place normalize each parameter\n for layer_idx, parameter in enumerate(self.parameters, 0):\n parameter *= (ref_point.layer_norm(layer_idx, order) / self.layer_norm(layer_idx, order))", "def normalize_feature(feature):\n # Compute mean and standard deviation, and return (x-mu)/std\n mean = np.mean(feature)\n std = np.std(feature)\n return np.divide(np.subtract(feature, mean), std)", "def normalize(x):\r\n return x/norm(x)", "def normal_init(self, shape):\n return np.random.normal(size=(shape[0],shape[1]))*0.01", "def test_normalize(self):\n\n a1 = vectors.Vector(4, 0, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(1, 0, 0))\n\n a1 = vectors.Vector(0, 4, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 1, 0))\n\n a1 = vectors.Vector(0, 0, 4)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 0, 1))", "def uniformize(self):\n\n self.len = len(self.x)\n\n if self.len > 1:\n # comput length of the shape:\n shape_length, scale = self.euclidian_length()\n\n # find new points:\n new_shape = Stroke()\n new_shape.x = []\n new_shape.y = []\n step = shape_length / float(self.len)\n biggest_smoller_point = 0\n new_shape.append(self.x[0], self.y[0])\n for i in 1 + np.array(range(len(self.x) - 1)):\n try:\n while i * step > scale[biggest_smoller_point]:\n biggest_smoller_point += 1\n\n biggest_smoller_point -= 1\n x0 = self.x[biggest_smoller_point]\n y0 = self.y[biggest_smoller_point]\n x1 = self.x[biggest_smoller_point + 1]\n y1 = self.y[biggest_smoller_point + 1]\n diff = float(i * step - scale[biggest_smoller_point])\n dist = float(scale[biggest_smoller_point + 1] - scale[biggest_smoller_point])\n new_x = x0 + diff * (x1 - x0) / dist\n new_y = y0 + diff * (y1 - y0) / dist\n new_shape.append(new_x, new_y)\n\n except IndexError:\n print i * step\n print biggest_smoller_point\n print scale\n # new_shape.append(self.x[-1], self.y[-1])\n\n\n self.x = new_shape.x\n self.y = new_shape.y\n self.len = new_shape.len", "def compute_output_shape(self,input_shape):\n return (input_shape[0][0])", "def _normalize(x):\n tol = 1e-10\n dims = x.shape\n\n x = x.flatten()\n inverse = (np.sum(x**2) + tol) ** -.5\n x = x * inverse\n x = np.reshape(x, dims)\n\n return x", "def normalize(arr: np.ndarray) -> np.ndarray:\n if max(arr) - min(arr) == 0:\n logger.warning(\n \"Normalize averted a div/0, the input data was:\\n {0}\".format(arr)\n )\n return np.ones(len(arr))\n return (arr - min(arr)) / (max(arr) - min(arr))", "def _check_shape(shape):\n if type(shape) == int:\n shape = (shape, shape)\n check_odd(shape, 'psf shape')\n return shape", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def __init__(self, target_shape, **kwargs):\n super(Reshape, self).__init__(**kwargs)\n self.target_shape = nest.flatten(target_shape)", "def orthogonal_initializer(shape, dtype=tf.float32, *args, **kwargs):\n del args\n del kwargs\n flat_shape = (shape[0], np.prod(shape[1:]))\n w = np.random.randn(*flat_shape)\n u, _, v = np.linalg.svd(w, full_matrices=False)\n w = u if u.shape == flat_shape else v\n return tf.constant(w.reshape(shape), dtype=dtype)", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def _normalize(self, x, axis, eps=1e-5):\n return x / (\n tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True)) + 1e-5)", "def random_orthonormal_initializer(shape, dtype=tf.float32,\n partition_info=None): # pylint: disable=unused-argument\n if len(shape) != 2 or shape[0] != shape[1]:\n raise ValueError(\"Expecting square shape, got %s\" % shape)\n _, u, _ = tf.svd(tf.random_normal(shape, dtype=dtype), full_matrices=True)\n return u", "def _normalize(a: np.ndarray, u: float=0, s: float=1) -> np.ndarray:\n a_norm = (a - np.mean(a)) / (np.std(a) + STABILITY)\n a_rescaled = a_norm * s + u\n\n return a_rescaled", "def get_mean_shape(shapes):\n mean_s = shapes[0]\n for shape in shapes[1:]:\n mean_s = mean_s + shape\n return mean_s / len(shapes)", "def normalize(self, factor):", "def _keras_update_shape(self, prep):\n\n # Run preprocessing on the training data\n X_transform = prep.fit_transform(self.X_train)\n\n # If the input shape has not been specified, it is simply the number of features in X_transform\n if 'input_shape' not in self.model.first_layer_kwargs:\n self.model.first_layer_kwargs['input_shape'] = tuple([X_transform.shape[1]])\n # Else update the input shape based on the number of features after preprocessing\n else:\n # Transform to a list to make the input_shape mutable\n self.model.first_layer_kwargs['input_shape'] = list(self.model.first_layer_kwargs['input_shape'])\n # Update the number of features based on X_transform\n if self.model.lags:\n self.model.first_layer_kwargs['input_shape'][-1] = X_transform.shape[1]//(self.model.lags + (1 if self.model.current_sample_as_input else 0))\n else:\n self.model.first_layer_kwargs['input_shape'][-1] = X_transform.shape[1]//np.prod(self.model.first_layer_kwargs['input_shape'][:-1])\n # Transform back to a tuple as required by Keras\n self.model.first_layer_kwargs['input_shape'] = tuple(self.model.first_layer_kwargs['input_shape'])\n \n # Ensure the Architecture has been updated\n self.model.architecture.iloc[0, 2]['input_shape'] = self.model.first_layer_kwargs['input_shape']\n \n # 2D, 3D and 4D data is valid. \n # e.g. The input_shape can be a tuple of (subsequences, timesteps, features), with subsequences and timesteps as optional.\n # A 4D shape may be valid for e.g. a ConvLSTM with (timesteps, rows, columns, features) \n if len(self.model.first_layer_kwargs['input_shape']) > 5:\n err = \"Unsupported input_shape: {}\".format(self.model.first_layer_kwargs['input_shape'])\n raise Exception(err)", "def normalize(vec):\n return vec / length(vec)", "def standardize_single_array(x, expected_shape=None):\n if x is None:\n return None\n\n if is_composite_or_composite_value(x):\n return x\n\n if isinstance(x, int):\n raise ValueError(\n 'Expected an array data type but received an integer: {}'.format(x))\n\n if (x.shape is not None and len(x.shape) == 1 and\n (expected_shape is None or len(expected_shape) != 1)):\n if tensor_util.is_tf_type(x):\n x = array_ops.expand_dims(x, axis=1)\n else:\n x = np.expand_dims(x, 1)\n return x", "def reshape(x, shape):\n if x.shape == shape:\n return chainer.as_variable(x)\n y, = Reshape(shape).apply((x,))\n return y", "def standard_normal(weight_shape):\n return np.random.normal(size=weight_shape)", "def normalized(v):\n norm = np.linalg.norm(v)\n if norm:\n return np.array(v) / norm\n else:\n return v", "def preserve_shape(func):\n @wraps(func)\n def wrapped_function(img, *args, **kwargs):\n shape = img.shape\n result = func(img, *args, **kwargs)\n result = result.reshape(shape)\n return result\n\n return wrapped_function", "def broadcast_to(x, shape):\n if x.shape == shape:\n return chainer.as_variable(x)\n y, = BroadcastTo(shape).apply((x,))\n return y", "def normalize(img):\n # TODO: implement this function.\n min_img = min([min(i) for i in img])\n max_img = max([max(i) for i in img])\n\n for i in range(len(img)):\n \tfor j in range(len(img[0])):\n \t\timg[i][j] = ((img[i][j] - min_img) / (max_img - min_img))\n #raise NotImplementedError\n return img" ]
[ "0.7858332", "0.7624819", "0.69473106", "0.6248372", "0.61092657", "0.60909945", "0.6038357", "0.60316885", "0.6010704", "0.587052", "0.5844497", "0.5780181", "0.57371706", "0.5733882", "0.5694286", "0.56934756", "0.5677857", "0.5620248", "0.56186056", "0.55923927", "0.5589575", "0.5565389", "0.55552363", "0.5545739", "0.5527111", "0.55265814", "0.5520939", "0.5500622", "0.54936635", "0.54822534", "0.54813814", "0.5476428", "0.5474385", "0.5454791", "0.544065", "0.54327166", "0.5432308", "0.54238784", "0.5423722", "0.54223686", "0.5422044", "0.5420738", "0.5418667", "0.54099935", "0.53986555", "0.53946584", "0.5374019", "0.53715605", "0.53670466", "0.535801", "0.53535974", "0.5347736", "0.5347473", "0.53463453", "0.53373337", "0.53205335", "0.53188634", "0.5316742", "0.53128886", "0.53043056", "0.5293903", "0.5293074", "0.52878386", "0.5274554", "0.52744806", "0.52712625", "0.52669394", "0.52666837", "0.5263022", "0.52594835", "0.52594835", "0.5254769", "0.52540475", "0.52475405", "0.5247445", "0.5246764", "0.52418184", "0.5238698", "0.5238339", "0.5236744", "0.52355134", "0.52287745", "0.5220859", "0.5215255", "0.5210869", "0.52094334", "0.5204866", "0.52038914", "0.51942843", "0.51832503", "0.51817787", "0.51745224", "0.5172025", "0.51701355", "0.51644504", "0.51641035", "0.5163097", "0.51615953", "0.51579064", "0.5157303" ]
0.73496735
2
Guess an appropriate chunk layout for an array, given its shape and the size of each element in bytes. Will allocate chunks only as large as MAX_SIZE. Chunks are generally close to some powerof2 fraction of each axis, slightly favoring bigger values for the last index. Undocumented and subject to change without warning.
def guess_chunks(shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]: ndims = len(shape) # require chunks to have non-zero length for all dimensions chunks = np.maximum(np.array(shape, dtype="=f8"), 1) # Determine the optimal chunk size in bytes using a PyTables expression. # This is kept as a float. dset_size = np.prod(chunks) * typesize target_size = CHUNK_BASE * (2 ** np.log10(dset_size / (1024.0 * 1024))) if target_size > CHUNK_MAX: target_size = CHUNK_MAX elif target_size < CHUNK_MIN: target_size = CHUNK_MIN idx = 0 while True: # Repeatedly loop over the axes, dividing them by 2. Stop when: # 1a. We're smaller than the target chunk size, OR # 1b. We're within 50% of the target chunk size, AND # 2. The chunk is smaller than the maximum chunk size chunk_bytes = np.prod(chunks) * typesize if ( chunk_bytes < target_size or abs(chunk_bytes - target_size) / target_size < 0.5 ) and chunk_bytes < CHUNK_MAX: break if np.prod(chunks) == 1: break # Element size larger than CHUNK_MAX chunks[idx % ndims] = math.ceil(chunks[idx % ndims] / 2.0) idx += 1 return tuple(int(x) for x in chunks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_chunk(chunk, sizes, max_iter=1000, rng=None):\n assert len(chunk) == sum(sizes), f\"{len(chunk)} != {sum(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # Precompute neighbors for each cube in the chunk\n neighbors = dict()\n for c in chunk:\n neighbors[c] = set(c.neighbors()) & set(chunk)\n for i in range(max_iter):\n result = split_chunk_iter(chunk, sizes, neighbors, rng)\n if result != None:\n return result\n raise SplitChunkMaxIterationExceeded(\"Ran out of iterations trying to split chunk\")", "def normalize_chunks(chunks: Any, shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]:\n\n # N.B., expect shape already normalized\n\n # handle auto-chunking\n if chunks is None or chunks is True:\n return guess_chunks(shape, typesize)\n\n # handle no chunking\n if chunks is False:\n return shape\n\n # handle 1D convenience form\n if isinstance(chunks, numbers.Integral):\n chunks = tuple(int(chunks) for _ in shape)\n\n # handle bad dimensionality\n if len(chunks) > len(shape):\n raise ValueError(\"too many dimensions in chunks\")\n\n # handle underspecified chunks\n if len(chunks) < len(shape):\n # assume chunks across remaining dimensions\n chunks += shape[len(chunks) :]\n\n # handle None or -1 in chunks\n if -1 in chunks or None in chunks:\n chunks = tuple(s if c == -1 or c is None else int(c) for s, c in zip(shape, chunks))\n\n chunks = tuple(int(c) for c in chunks)\n return chunks", "def yield_chunks(arr, chunk_size):\r\n larr = len(arr)\r\n if larr < chunk_size:\r\n raise ValueError(\"The array length (%d) must be larger than the chunk size (%d)\" % (len(arr), chunk_size))\r\n\r\n cursor = 0\r\n while cursor < larr:\r\n next_cursor = min(cursor + chunk_size, larr)\r\n yield arr[cursor:next_cursor]\r\n cursor = next_cursor", "def test_chunk_size(self):\n for chunk_size, expected_n_chunks in [(1, 100), (3, 34), (200, 1), (None, 1)]:\n with self.subTest(chunk_size=chunk_size):\n iterable_of_args, iterable_len, chunk_size_, n_splits = apply_numpy_chunking(\n self.test_data_numpy, chunk_size=chunk_size, n_splits=1\n )\n\n # Materialize generator and test contents. The chunks should be of size chunk_size (expect for the last\n # chunk which can be smaller)\n iterable_of_args = list(iterable_of_args)\n self.assertEqual(len(iterable_of_args), expected_n_chunks)\n chunk_size = chunk_size or 100\n for chunk_idx, chunk in enumerate(iterable_of_args):\n self.assertIsInstance(chunk[0], np.ndarray)\n np.testing.assert_array_equal(chunk[0], self.test_data_numpy[chunk_idx * chunk_size:\n (chunk_idx + 1) * chunk_size])\n\n # Test other output\n self.assertEqual(iterable_len, expected_n_chunks)\n self.assertEqual(chunk_size_, 1)\n self.assertIsNone(n_splits)", "def perform_chunking(self, data_size, chunk_size):\r\n\r\n chunks, i = [], 0\r\n while True:\r\n chunks.append((i * (chunk_size - self.overlap / 2), i * (chunk_size - self.overlap / 2) + chunk_size))\r\n i += 1\r\n if chunks[-1][1] > data_size:\r\n break\r\n\r\n n_count = len(chunks)\r\n chunks[-1] = tuple(x - (n_count * chunk_size - data_size - (n_count - 1) * self.overlap / 2) for x in chunks[-1])\r\n chunks = [(int(x), int(y)) for x, y in chunks]\r\n return chunks", "def get_chunks(size):\n chunk_start = 0\n chunk_size = 0x20000\n\n while chunk_start + chunk_size < size:\n yield (chunk_start, chunk_size)\n chunk_start += chunk_size\n if chunk_size < 0x100000:\n chunk_size += 0x20000\n\n if chunk_start < size:\n yield (chunk_start, size - chunk_start)", "def get_chunks(num_items, num_steps):\n chunk_sizes = np.zeros(num_steps, dtype=int)\n chunk_sizes[:] = num_items // num_steps\n chunk_sizes[:num_items % num_steps] += 1\n\n chunk_offsets = np.roll(np.cumsum(chunk_sizes), 1)\n chunk_offsets[0] = 0\n return chunk_sizes, chunk_offsets", "def calc_chunk_sizes(data_size, min_chunk_size=CHUNK_SIZE_MIN - 4,\n tgt_chunk_size=CHUNK_SIZE_TGT - 4,\n max_chunk_size=CHUNK_SIZE_MAX - 4):\n logger.debug('data size is %s' % data_size)\n logger.debug('calculating chunk sizes')\n if data_size < min_chunk_size:\n return [min_chunk_size]\n elif data_size <= tgt_chunk_size:\n return [data_size]\n # so now, we're only dealing with sizes > target size\n chunk_count = 1\n direction = 'up'\n changed_direction = False\n while 1:\n chunk_size = data_size * 1.0 / chunk_count\n margin = abs(tgt_chunk_size - chunk_size)\n if chunk_size == tgt_chunk_size:\n return [tgt_chunk_size,] * chunk_count\n elif changed_direction:\n if margin >= last_margin:\n chunk_count = last_chunk_count\n break\n elif chunk_size >= max_chunk_size:\n if direction == 'down':\n changed_direction = True\n direction = 'up'\n last_margin = margin\n last_chunk_count = chunk_count\n elif chunk_size <= min_chunk_size:\n if direction == 'up':\n changed_direction = True\n direction = 'down'\n last_margin = margin\n last_chunk_count = chunk_count\n else:\n last_margin = margin\n last_chunk_count = chunk_count\n if tgt_chunk_size < chunk_size < max_chunk_size and \\\n direction == 'down':\n # a little bigger than we'd like (need more pieces)\n direction = 'up'\n changed_direction = True\n elif min_chunk_size <= chunk_size < tgt_chunk_size and \\\n direction == 'up':\n # a bit too small (try fewer pieces)\n direction = 'down'\n changed_direction = True\n if direction == 'up':\n chunk_count += 1\n elif direction == 'down':\n chunk_count -= 1\n\n # Create the list of chunk sizes. Spread out the rounding difference among\n # the chunks so that they're close to the same size.\n chunk_size_raw = data_size * 1.0 / chunk_count\n logger.debug('unrounded chunk size is %s ' % chunk_size_raw)\n chunk_size = int(round(chunk_size_raw))\n logger.debug('rounded chunk size is %s' % chunk_size)\n total_round_diff = data_size - chunk_size * chunk_count\n msg = '%s rounded bytes to distribute among %s chunks'\n logger.debug(msg % (total_round_diff, chunk_count))\n # Round it to a full byte. If it's negative, round down because rounding\n # up will take away too many chunks.\n if total_round_diff < 0:\n total_round_diff = int(math.floor(total_round_diff))\n else:\n total_round_diff = int(math.ceil(total_round_diff))\n round_diff_sum = total_round_diff\n chunk_sizes = []\n for i in range(chunk_count):\n if round_diff_sum > 0:\n this_chunk_size = chunk_size + 1\n round_diff_sum -= 1\n elif round_diff_sum < 0:\n this_chunk_size = chunk_size - 1\n round_diff_sum += 1\n else:\n this_chunk_size = chunk_size\n chunk_sizes.append(this_chunk_size)\n assert data_size == sum(chunk_sizes), '%s, %s' (data_size,\n sum(chunk_sizes))\n return chunk_sizes", "def blockify_chunks(chunks):\n acc = []\n size = 0\n for chunk, chunk_size in chunks:\n assert len(chunk) == CHUNK_SIZE\n assert len(acc) <= BLOCK_SIZE\n if len(acc) == BLOCK_SIZE:\n # Only the last chunk may be short.\n assert size == CHUNK_SIZE * BLOCK_SIZE\n yield acc, size\n acc = []\n size = 0\n acc.append(chunk)\n size += chunk_size\n assert acc\n yield acc, size", "def iter_slices(shape, chunk_size):\n assert len(shape) == len(chunk_size)\n num_grid_chunks = [int(ceil(s / float(c))) for s, c in zip(shape, chunk_size)]\n for grid_index in numpy.ndindex(*num_grid_chunks):\n yield tuple(\n slice(min(d * c, stop), min((d + 1) * c, stop)) for d, c, stop in zip(grid_index, chunk_size, shape))", "def iterate_array_in_chunks(arr, size: int):\n for i in range(0, len(arr), size):\n yield arr[i:i+size]", "def dur_chunk_sizes(n, ary):\n ret = np.ones((ary,), dtype=np.int32) * (n // ary)\n ret[: n % ary] = n // ary + 1\n assert ret.sum() == n\n return ret", "def split_array(array: np.ndarray, parts: int):\n\n if parts == -1:\n parts = array.size\n shape = array.shape\n possible_chunk_sizes = []\n # Generate all possible chunk sizes for the given array shape\n for chunk_size in product(*[range(1, shape[i] + 1) for i in range(len(shape))]):\n # Check if the number of chunks generated by the current chunk size is equal to the desired number of parts\n if np.prod(\n [shape[i] // chunk_size[i] + int(shape[i] % chunk_size[i] != 0) for i in range(len(shape))]) == parts:\n possible_chunk_sizes.append(chunk_size)\n # Sort the possible chunk sizes in ascending order of the sum of the squares of their dimensions\n possible_chunk_sizes.sort(key=lambda x: np.sum(np.array(x) ** 2)) # type: ignore\n if not possible_chunk_sizes:\n logging.warning(\"Could not divide the domain in %d parts. Trying with parts=%d.\", parts, parts - 1)\n return split_array(array=array, parts=parts - 1)\n selected_chunk_size = possible_chunk_sizes[0]\n\n chunks = []\n # Get the number of chunks for the first possible chunk size\n num_chunks = [shape[i] // selected_chunk_size[i] + int(shape[i] % selected_chunk_size[i] != 0) for i in\n range(len(shape))]\n indexes = [range(num_chunks[i]) for i in range(len(shape))]\n # Iterate over the chunks and append the corresponding slice of the array to the chunks list\n for indx in product(*indexes):\n current_slice = tuple(\n slice(selected_chunk_size[i] * indx[i], min(selected_chunk_size[i] * (indx[i] + 1), shape[i])) for i in\n range(len(shape)))\n chunks.append(array[current_slice])\n return chunks", "def look_for_biggest_structure(game, chunk, imgs, hmap, nmax, type_):\n for n in range(nmax,0,-1):\n i = 0\n m = parameters.MAX_VILLAGE_WIDTH * n / parameters.MAX_VILLAGE_SIZE\n while i < parameters.VILLAGE_TRY:\n chunkpos = np.random.randint(0,parameters.S,2)\n cx,cy = chunkpos\n h = np.sum(hmap[cx:cx+m,cy:cy+m]) / (m*m)\n if h > parameters.VILLAGE_LEVEL:\n force_build_structure(game, imgs, chunk, chunkpos, n, type_)\n return n\n i += 1\n return 0", "def calculateChunkSize(size, record_count, splits):\n avg_record_size = size / record_count\n logging.info(\n \"Avg record size: %0.02f=%d/%d\" %\n (avg_record_size, size, record_count))\n chunk = floor(ceil(size / (splits * avg_record_size)) * avg_record_size)\n\n logging.info(\n \"Setting chunk to: %d=floor(ceil(%d/(%d*%0.02f))*%0.02d)\" %\n (chunk, size, splits, avg_record_size, avg_record_size))\n return chunk", "def reshape_as_blocks(data, block_size):\n data, block_size = _process_block_inputs(data, block_size)\n\n if np.any(np.mod(data.shape, block_size) != 0):\n raise ValueError(\n \"Each dimension of block_size must divide evenly \"\n \"into the corresponding dimension of data\"\n )\n\n nblocks = np.array(data.shape) // block_size\n new_shape = tuple(k for ij in zip(nblocks, block_size) for k in ij)\n nblocks_idx = tuple(range(0, len(new_shape), 2)) # even indices\n block_idx = tuple(range(1, len(new_shape), 2)) # odd indices\n\n return data.reshape(new_shape).transpose(nblocks_idx + block_idx)", "def split_by_size(array: Iterable[T], size: int) -> Iterable[List[T]]:\n chunk = []\n for e in array:\n chunk.append(e)\n if len(chunk) == size:\n yield chunk\n chunk = []\n if len(chunk) > 0:\n yield chunk", "def split_chunk_iter(chunk, sizes, neighbors, rng=None):\n assert len(chunk) > len(sizes), f\"{len(chunk)} !> {len(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # start by drawing three random items\n splits = [[c] for c in rng.sample(list(chunk), len(sizes))]\n unused = set(chunk) - set(sum(splits, []))\n max_iters = max(sizes) * len(sizes) # worst case\n for j in range(max_iters):\n i = j % len(sizes)\n size = sizes[i]\n split = splits[i]\n if len(split) == size:\n continue\n # get all of the neighbors of the split\n candidates = set()\n for c in split:\n candidates |= neighbors[c]\n # filter to unused cubes\n candidates = candidates & unused\n if not candidates:\n return None\n # Pick a candidate at random and add it\n choice = rng.choice(list(candidates))\n split.append(choice)\n unused.remove(choice)\n return splits", "def ensure_chunk_size(da: xr.DataArray, **minchunks: int) -> xr.DataArray:\n if not uses_dask(da):\n return da\n\n all_chunks = dict(zip(da.dims, da.chunks))\n chunking = dict()\n for dim, minchunk in minchunks.items():\n chunks = all_chunks[dim]\n if minchunk == -1 and len(chunks) > 1:\n # Rechunk to single chunk only if it's not already one\n chunking[dim] = -1\n\n toosmall = np.array(chunks) < minchunk # Chunks that are too small\n if toosmall.sum() > 1:\n # Many chunks are too small, merge them by groups\n fac = np.ceil(minchunk / min(chunks)).astype(int)\n chunking[dim] = tuple(\n sum(chunks[i : i + fac]) for i in range(0, len(chunks), fac)\n )\n # Reset counter is case the last chunks are still too small\n chunks = chunking[dim]\n toosmall = np.array(chunks) < minchunk\n if toosmall.sum() == 1:\n # Only one, merge it with adjacent chunk\n ind = np.where(toosmall)[0][0]\n new_chunks = list(chunks)\n sml = new_chunks.pop(ind)\n new_chunks[max(ind - 1, 0)] += sml\n chunking[dim] = tuple(new_chunks)\n\n if chunking:\n return da.chunk(chunks=chunking)\n return da", "def __initialize_from_shape_and_chunk_size(self, shape: Tuple[int, ...], chunk_size: Tuple[int, ...]):\n collection_shape = tuple(\n shape[i] // chunk_size[i] + int(shape[i] % chunk_size[i] != 0) for i in range(len(shape)))\n objects = np.empty(collection_shape, dtype=MultiDimensionalSlice)\n num_chunks = [shape[i] // chunk_size[i] + int(shape[i] % chunk_size[i] != 0) for i in range(len(shape))]\n indexes = [range(num_chunks[i]) for i in range(len(shape))]\n\n for indx in product(*indexes):\n current_slice = tuple(\n slice(chunk_size[i] * indx[i], min(chunk_size[i] * (indx[i] + 1), shape[i])) for i in range(len(shape)))\n objects[indx] = MultiDimensionalSlice(indices=indx, slices=current_slice)\n self.__initialize_from_array(objects_array=objects)", "def split_into_subarrays_of_max_len(arr, max_len=44100):\n return np.split(arr, np.arange(max_len, len(arr), max_len))", "def chunks(array, size: int):\r\n for i in range(0, len(array), size):\r\n yield array[i:i + size]", "def get_blocks_shape(big_array, small_array):\n return tuple([int(b/s) for b, s in zip(big_array, small_array)])", "def get_chunks(vals, size):\n for i in range(0, len(vals), size):\n yield vals[i:i + size]", "def window_blocks(large_array, window_size):\n y_size = large_array.shape[0]/window_size\n blocks_array = large_array.reshape(y_size, window_size)\n return blocks_array", "def test_chunk_size_priority_over_n_splits(self):\n with self.subTest(input='list', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=1, n_splits=6, n_jobs=None), 13)\n with self.subTest(input='numpy', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=1, n_splits=6,\n n_jobs=None), 100)\n\n with self.subTest(input='list', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=3, n_splits=3, n_jobs=None), 5)\n with self.subTest(input='numpy', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=3, n_splits=3,\n n_jobs=None), 34)", "def _chunkify(arr, size):\n arrs = []\n for i in range(0, len(arr), size):\n chunk = bytearray(arr[i:i + size])\n arrs.append(chunk)\n return arrs", "def rand_aligned_slices(maxdim=5, maxshape=16):\n ndim = randrange(1, maxdim+1)\n minshape = 2\n n = randrange(100)\n if n >= 95:\n minshape = 0\n elif n >= 90:\n minshape = 1\n all_random = True if randrange(100) >= 80 else False\n lshape = [0]*ndim; rshape = [0]*ndim\n lslices = [0]*ndim; rslices = [0]*ndim\n\n for n in range(ndim):\n small = randrange(minshape, maxshape+1)\n big = randrange(minshape, maxshape+1)\n if big < small:\n big, small = small, big\n\n # Create a slice that fits the smaller value.\n if all_random:\n start = randrange(-small, small+1)\n stop = randrange(-small, small+1)\n step = (1,-1)[randrange(2)] * randrange(1, small+2)\n s_small = slice(start, stop, step)\n _, _, _, slicelen = slice_indices(s_small, small)\n else:\n slicelen = randrange(1, small+1) if small > 0 else 0\n s_small = randslice_from_slicelen(slicelen, small)\n\n # Create a slice of the same length for the bigger value.\n s_big = randslice_from_slicelen(slicelen, big)\n if randrange(2) == 0:\n rshape[n], lshape[n] = big, small\n rslices[n], lslices[n] = s_big, s_small\n else:\n rshape[n], lshape[n] = small, big\n rslices[n], lslices[n] = s_small, s_big\n\n return lshape, rshape, tuple(lslices), tuple(rslices)", "def _calculate_step_sizes(x_size, y_size, num_chunks):\n # First we try to split only along fast x axis\n xstep = max(1, int(x_size / num_chunks))\n\n # More chunks are needed only if xstep gives us fewer chunks than\n # requested.\n x_chunks = int(x_size / xstep)\n\n if x_chunks >= num_chunks:\n ystep = y_size\n else:\n # The x and y loops are nested, so the number of chunks\n # is multiplicative, not additive. Calculate the number\n # of y chunks we need to get at num_chunks.\n y_chunks = int(num_chunks / x_chunks) + 1\n ystep = max(1, int(y_size / y_chunks))\n\n return xstep, ystep", "def get_chunk_slices(ds_dim, chunk_size):\n chunks = list(range(0, ds_dim, chunk_size))\n if chunks[-1] < ds_dim:\n chunks.append(ds_dim)\n else:\n chunks[-1] = ds_dim\n\n chunks = list(zip(chunks[:-1], chunks[1:]))\n\n return chunks", "def test_batch_size_pack_size():", "def get_chunks_ranges(\n total: int, *, chunk_size: int = None, parts: int = None\n) -> List[Tuple[int, int]]:\n\n assert (chunk_size is not None) ^ (\n parts is not None\n ), \"Exactly one of chunk_size or parts must be provided\"\n\n if chunk_size is not None:\n if chunk_size >= total:\n return [(0, total)]\n\n steps = np.arange(0, total, chunk_size, dtype=np.int64)\n ans = list(zip(steps[:-1], steps[1:]))\n if ans[-1][-1] < total:\n ans.append((ans[-1][-1], total))\n ans[-1] = (ans[-1][0], min(ans[-1][-1], total))\n return ans\n\n elif parts is not None:\n chunk_size = np.ceil(total / parts)\n return get_chunks_ranges(total, chunk_size=chunk_size)\n\n assert False, \"should not reach here\"", "def chunk(\n self,\n shape: Tuple[int],\n split: int,\n rank: int = None,\n w_size: int = None,\n sparse: bool = False,\n ) -> Tuple[int, Tuple[int], Tuple[slice]]:\n # ensure the split axis is valid, we actually do not need it\n split = sanitize_axis(shape, split)\n if split is None:\n return 0, shape, tuple(slice(0, end) for end in shape)\n rank = self.rank if rank is None else rank\n w_size = self.size if w_size is None else w_size\n if not isinstance(rank, int) or not isinstance(w_size, int):\n raise TypeError(\"rank and size must be integers\")\n\n dims = len(shape)\n size = shape[split]\n chunk = size // w_size\n remainder = size % w_size\n\n if remainder > rank:\n chunk += 1\n start = rank * chunk\n else:\n start = rank * chunk + remainder\n end = start + chunk\n\n if sparse:\n return start, end\n\n return (\n start,\n tuple(shape[i] if i != split else end - start for i in range(dims)),\n tuple(slice(0, shape[i]) if i != split else slice(start, end) for i in range(dims)),\n )", "def rand_aligned_slices(maxdim=5, maxshape=16):\n ndim = randrange(1, maxdim + 1)\n minshape = 2\n n = randrange(100)\n if n >= 95:\n minshape = 0\n elif n >= 90:\n minshape = 1\n all_random = True if randrange(100) >= 80 else False\n lshape = [0] * ndim\n rshape = [0] * ndim\n lslices = [0] * ndim\n rslices = [0] * ndim\n for n in range(ndim):\n small = randrange(minshape, maxshape + 1)\n big = randrange(minshape, maxshape + 1)\n if big < small:\n big, small = small, big\n if all_random:\n start = randrange(-small, small + 1)\n stop = randrange(-small, small + 1)\n step = (1, -1)[randrange(2)] * randrange(1, small + 2)\n s_small = slice(start, stop, step)\n _, _, _, slicelen = slice_indices(s_small, small)\n else:\n slicelen = randrange(1, small + 1) if small > 0 else 0\n s_small = randslice_from_slicelen(slicelen, small)\n s_big = randslice_from_slicelen(slicelen, big)\n if randrange(2) == 0:\n rshape[n], lshape[n] = big, small\n rslices[n], lslices[n] = s_big, s_small\n else:\n rshape[n], lshape[n] = small, big\n rslices[n], lslices[n] = s_small, s_big\n return lshape, rshape, tuple(lslices), tuple(rslices)", "def build_chunks(read_bytes, file_size, chunk_size):\n\n chunks = []\n\n index = 0\n start = 0\n\n while start < file_size:\n end = min(start + chunk_size, file_size)\n size = end - start\n\n chunk = FileChunk(index, size, partial(read_bytes, start, size))\n chunks.append(chunk)\n\n index += 1\n start += chunk_size\n\n return chunks", "def split_chunks(\n key: core.ChunkKey,\n dataset: xarray.Dataset,\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n # This function splits consolidated arrays into blocks of new sizes, e.g.,\n # ⌈x_00 x_01 ...⌉ ⌈⌈x_00⌉ ⌈x_01⌉ ...⌉\n # X = |x_10 x_11 ...| = ||x_10| |x_11| ...|\n # |x_20 x_21 ...| |⌊x_20⌋ ⌊x_21⌋ ...|\n # ⌊ ... ... ...⌋ ⌊ ... ... ...⌋\n # and emits them as (ChunkKey, xarray.Dataset) pairs.\n all_bounds = []\n for dim, chunk_size in target_chunks.items():\n start = key.get(dim, 0)\n stop = start + dataset.sizes[dim]\n all_bounds.append(_split_chunk_bounds(start, stop, chunk_size))\n\n for bounds in itertools.product(*all_bounds):\n offsets = dict(key)\n slices = {}\n for dim, (start, stop) in zip(target_chunks, bounds):\n base = key.get(dim, 0)\n offsets[dim] = start\n slices[dim] = slice(start - base, stop - base)\n\n new_key = core.ChunkKey(offsets)\n new_chunk = dataset.isel(slices)\n yield new_key, new_chunk", "def get_chunks(self, chunk_size, max_chunks=None):\n from .helpers import get_chunks as chunker\n return chunker(self, chunk_size, max_chunks=max_chunks)", "def getMyChunkSize(numJobs, numWorkers, chunkSize, rank):\n assert(numJobs >= numWorkers)\n allJobs = np.arange(numJobs)\n startInd = (np.arange(numWorkers)) * chunkSize\n endInd = (np.arange(numWorkers) + 1) * chunkSize\n endInd[-1] = numJobs\n myJobs = allJobs[startInd[rank]:endInd[rank]]\n return myJobs", "def iter_chunks(iterable, size):\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, size))\n if len(chunk) == 0:\n break\n yield chunk", "def max_chunk_size(self):\n return min(constants.MAX_CHUNK_SIZE, self._maxdata // 2) or constants.MAX_PUSH_DATA", "def _nonoverlapping_chunks(sig, n_samples):\n\n n_chunks = int(np.floor(len(sig) / float(n_samples)))\n chunks = np.reshape(sig[:int(n_chunks * n_samples)], (n_chunks, int(n_samples)))\n\n return chunks", "def greedy_split(arr, n, axis=0):\n length = arr.shape[axis]\n # compute the size of each of the first n-1 blocks\n block_size = int(np.ceil(length / float(n)))\n # the indices at which the splits will occur\n ix = np.arange(block_size, length, block_size)\n return np.array(np.split(arr, ix, axis))", "def match_chunks(*arrays):\n target = arrays[0].datashape\n result = []\n for a in arrays:\n ds = a.datashape\n for i, j in zip(reversed(list(range(a.ndim))),\n reversed(list(range(target.ndim)))):\n ds = change_axis_schema(ds, i, chunk=target.chunk_size[j],\n overlap=target.chunk_overlap[j])\n if a.datashape.schema != ds.schema:\n a = a.redimension(ds.schema)\n result.append(a)\n\n return tuple(result)", "def _get_chunk_patch_info(\n img_shape, chunk_input_shape, patch_input_shape, patch_output_shape\n):\n round_to_multiple = lambda x, y: np.floor(x / y) * y\n patch_diff_shape = patch_input_shape - patch_output_shape\n\n chunk_output_shape = chunk_input_shape - patch_diff_shape\n chunk_output_shape = round_to_multiple(\n chunk_output_shape, patch_output_shape\n ).astype(np.int64)\n chunk_input_shape = (chunk_output_shape + patch_diff_shape).astype(np.int64)\n\n patch_input_tl_list, _ = _get_patch_top_left_info(\n img_shape, patch_input_shape, patch_output_shape\n )\n patch_input_br_list = patch_input_tl_list + patch_input_shape\n patch_output_tl_list = patch_input_tl_list + patch_diff_shape\n patch_output_br_list = patch_output_tl_list + patch_output_shape\n patch_info_list = np.stack(\n [\n np.stack([patch_input_tl_list, patch_input_br_list], axis=1),\n np.stack([patch_output_tl_list, patch_output_br_list], axis=1),\n ],\n axis=1,\n )\n\n chunk_input_tl_list, _ = _get_patch_top_left_info(\n img_shape, chunk_input_shape, chunk_output_shape\n )\n chunk_input_br_list = chunk_input_tl_list + chunk_input_shape\n # * correct the coord so it stay within source image\n y_sel = np.nonzero(chunk_input_br_list[:, 0] > img_shape[0])[0]\n x_sel = np.nonzero(chunk_input_br_list[:, 1] > img_shape[1])[0]\n chunk_input_br_list[y_sel, 0] = (\n img_shape[0] - patch_diff_shape[0]\n ) - chunk_input_tl_list[y_sel, 0]\n chunk_input_br_list[x_sel, 1] = (\n img_shape[1] - patch_diff_shape[1]\n ) - chunk_input_tl_list[x_sel, 1]\n chunk_input_br_list[y_sel, 0] = round_to_multiple(\n chunk_input_br_list[y_sel, 0], patch_output_shape[0]\n )\n chunk_input_br_list[x_sel, 1] = round_to_multiple(\n chunk_input_br_list[x_sel, 1], patch_output_shape[1]\n )\n chunk_input_br_list[y_sel, 0] += chunk_input_tl_list[y_sel, 0] + patch_diff_shape[0]\n chunk_input_br_list[x_sel, 1] += chunk_input_tl_list[x_sel, 1] + patch_diff_shape[1]\n chunk_output_tl_list = chunk_input_tl_list + patch_diff_shape // 2\n chunk_output_br_list = chunk_input_br_list - patch_diff_shape // 2 # may off pixels\n chunk_info_list = np.stack(\n [\n np.stack([chunk_input_tl_list, chunk_input_br_list], axis=1),\n np.stack([chunk_output_tl_list, chunk_output_br_list], axis=1),\n ],\n axis=1,\n )\n\n return chunk_info_list, patch_info_list", "def group_each_into_chunks(\n chunk_size: int,\n) -> Callable[[Iterable[T]], Generator[T, None, None]]:\n\n def _group_each_into_chunks(arr):\n yield from group_each_until(\n lambda g: len(g) >= chunk_size and len(g), yield_final=True\n )(arr)\n\n return _group_each_into_chunks", "def normalize_chunks(\n chunks: Mapping[str, Union[int, Tuple[int, ...]]],\n dim_sizes: Mapping[str, int],\n) -> Dict[str, int]:\n if not chunks.keys() <= dim_sizes.keys():\n raise ValueError(\n 'all dimensions used in chunks must also have an indicated size: '\n f'chunks={chunks} vs dim_sizes={dim_sizes}')\n result = {}\n for dim, size in dim_sizes.items():\n if dim not in chunks:\n result[dim] = size\n elif isinstance(chunks[dim], tuple):\n unique_chunks = set(chunks[dim])\n if len(unique_chunks) != 1:\n raise ValueError(\n f'chunks for dimension {dim} are not constant: {unique_chunks}',\n )\n result[dim], = unique_chunks\n elif chunks[dim] == -1:\n result[dim] = size\n else:\n result[dim] = chunks[dim]\n return result", "def get_chunks(sequence, chunk_size):\n seq_length = len(sequence)\n seq_list = []\n treshold = int(seq_length) // int(chunk_size)\n if treshold <4:\n raise ValueError(\"Change chunk size\")\n for i in range(treshold):\n seq = sequence[i*chunk_size:(i+1)*chunk_size]\n seq_list.append(seq)\n return seq_list", "def chunk(flat, sizes):\n iter_flat = iter(flat)\n yield from (list(islice(iter_flat, 0, size)) for size in sizes)", "def block_sizes(max_size):\n if max_size > 8:\n raise ValueError(\"Invalid max_size value specified!\")\n else:\n return [f\"{2**x}x{2**y}\" for x in range(2, max_size) for y in range(2, max_size) if x != 2 or y != 2]", "def test_large_chunk(self):\n chunksize = 7 * (1024 ** 2)\n size = 8 * (1024 ** 3)\n self.assertEqual(find_chunksize(size, chunksize), chunksize * 2)", "def rechunk(array, chunk_size=None, chunk_overlap=None):\n\n # deal with chunk sizes\n ds = array.datashape.copy()\n if chunk_size is None:\n chunk_size = ds.chunk_size\n if isinstance(chunk_size, int):\n chunk_size = [chunk_size] * ds.ndim\n ds.chunk_size = chunk_size\n\n if chunk_overlap is None:\n chunk_overlap = ds.chunk_overlap\n if isinstance(chunk_overlap, int):\n chunk_overlap = [chunk_overlap] * ds.ndim\n ds.chunk_overlap = chunk_overlap\n\n if ds != array.datashape:\n array = array.redimension(ds.schema)\n return array", "def get_chunks(sequence, ck_size):\n \n list_chunk = []\n i=1\n l = len(sequence)\n if l < 4*ck_size:\n raise ValueError(\"Chunk size should be of 4 at least \")\n for i in range(1, l):\n if i*ck_size < l:\n list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #while(i*ck_size < l):\n #list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #i += 1\n return list_chunk", "def test_create_chunks():\n items = list(range(0, 100))\n size = 3\n\n chunks = create_chunks(items, size)\n\n current = next(chunks)\n assert len(current) == size\n assert current == [0, 1, 2]\n\n current = next(chunks)\n assert current == [3, 4, 5]", "def torch_big_sample(array, indexes, desired_shape):\n torch_arr = torch.tensor(array, dtype=torch.float32)\n indexed = torch_arr[[indexes[0], indexes[1]]]\n return indexed.reshape(desired_shape)\n #chunked = torch.chunk(indexed, desired_shape[0])\n #chunked = [chunk.reshape(desired_shape[1:]) for chunk in chunked]\n #out = torch.stack(chunked)", "def variable_axis_size_partitioner(\n max_shard_bytes, axis=0, bytes_per_string_element=16, max_shards=None):\n if max_shard_bytes < 1 or bytes_per_string_element < 1:\n raise ValueError(\n \"Both max_shard_bytes and bytes_per_string_element must be positive. \"\n f\"Currently, max_shard_bytes is {max_shard_bytes} and\"\n f\"bytes_per_string_element is {bytes_per_string_element}\")\n if max_shards and max_shards < 1:\n raise ValueError(\n \"max_shards must be positive.\")\n\n def _partitioner(shape, dtype):\n \"\"\"Partitioner that partitions shards to have max_shard_bytes total size.\n\n Args:\n shape: A `TensorShape`.\n dtype: A `DType`.\n\n Returns:\n A tuple representing how much to slice each axis in shape.\n\n Raises:\n ValueError: If shape is not a fully defined `TensorShape` or dtype is not\n a `DType`.\n \"\"\"\n if not isinstance(shape, tensor_shape.TensorShape):\n raise ValueError(f\"shape is not a TensorShape: {shape}\")\n if not shape.is_fully_defined():\n raise ValueError(f\"shape is not fully defined: {shape}\")\n if not isinstance(dtype, dtypes.DType):\n raise ValueError(f\"dtype is not a DType: {dtype}\")\n\n if dtype.base_dtype == dtypes.string:\n element_size = bytes_per_string_element\n else:\n element_size = dtype.size\n\n partitions = [1] * shape.ndims\n bytes_per_slice = 1.0 * (\n shape.num_elements() / shape.dims[axis].value) * element_size\n # How many slices can we fit on one shard of size at most max_shard_bytes?\n # At least one slice is required.\n slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))\n # How many shards do we need for axis given that each shard fits\n # slices_per_shard slices from a total of shape[axis] slices?\n axis_shards = int(math.ceil(\n 1.0 * shape.dims[axis].value / slices_per_shard))\n if max_shards:\n axis_shards = min(max_shards, axis_shards)\n\n partitions[axis] = axis_shards\n\n return partitions\n\n return _partitioner", "def getMyChunkSize(numJobs, numWorkers, chunkSize, rank):\n print \"numJobs, numWorkers: \", numJobs, numWorkers, chunkSize\n assert(numJobs >= numWorkers)\n allJobs = np.arange(numJobs)\n startInd = (np.arange(numWorkers)) * chunkSize\n endInd = (np.arange(numWorkers) + 1) * chunkSize\n endInd[-1] = numJobs\n myJobs = allJobs[startInd[rank]:endInd[rank]]\n return myJobs", "def split_or_chunk(inputs, num_chunks_or_sections, dim=0):\n def split_map(obj):\n if isinstance(obj, torch.Tensor):\n if isinstance(num_chunks_or_sections, int):\n return torch.chunk(obj, num_chunks_or_sections, dim=dim)\n else:\n return torch.split(obj, num_chunks_or_sections, dim=dim)\n if isinstance(obj, tuple) and obj:\n return list(zip(*map(split_map, obj)))\n if isinstance(obj, list) and obj:\n return list(map(list, zip(*map(split_map, obj))))\n if isinstance(obj, dict) and obj:\n return list(map(type(obj), zip(*map(split_map, obj.items()))))\n if isinstance(num_chunks_or_sections, int):\n return [obj for chunk in range(num_chunks_or_sections)]\n else:\n return [obj for chunk in num_chunks_or_sections]\n\n # After split_map is called, a split_map cell will exist. This cell\n # has a reference to the actual function split_map, which has references\n # to a closure that has a reference to the split_map cell (because the\n # fn is recursive). To avoid this reference cycle, we set the function to\n # None, clearing the cell\n try:\n return split_map(inputs)\n finally:\n split_map = None", "def test_generate_batches_from_1d_array_with_dividable_batch_size(\n array,\n batch_size,\n expected):\n gen = BatchGenerator(array, batch_size=batch_size)\n\n actual = gen.drain()\n\n assert actual == expected", "def pack_unpack_hard():\n # Array is apprx. 1.5 GB large\n # should make apprx 1536 chunks\n pack_unpack(100, chunk_size=reverse_pretty('1M'), progress=simple_progress)", "def rechunking_plan(\n dim_sizes: Mapping[str, int],\n source_chunks: Mapping[str, int],\n target_chunks: Mapping[str, int],\n itemsize: int,\n max_mem: int,\n) -> List[Dict[str, int]]:\n plan_shapes = algorithm.rechunking_plan(\n shape=tuple(dim_sizes.values()),\n source_chunks=tuple(source_chunks[dim] for dim in dim_sizes),\n target_chunks=tuple(target_chunks[dim] for dim in dim_sizes),\n itemsize=itemsize,\n max_mem=max_mem,\n )\n return [dict(zip(dim_sizes.keys(), shapes)) for shapes in plan_shapes]", "def _batchify(data: nd.NDArray, batch_size):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = len(data) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data[0: nbatch * batch_size]\n # Evenly divide the data across the bsz batches.\n data = data.reshape(batch_size, -1).transpose()\n # if torch.cuda.is_available():\n # data = data.cuda()\n return data", "def create_chunker(\n *,\n string_max_bytes: int,\n encoding: CharacterEncoding = CharacterEncoding.UTF8,\n max_chunk_value: Optional[int] = None,\n dtype: tf.dtypes.DType = _DEFAULT_DTYPE,\n) -> Chunker:\n if encoding == CharacterEncoding.UTF8:\n return UTF8Chunker(\n string_max_bytes=string_max_bytes,\n max_chunk_value=max_chunk_value,\n dtype=dtype,\n )\n if encoding == CharacterEncoding.UNKNOWN:\n return BinaryChunker(\n string_max_bytes=string_max_bytes,\n max_chunk_value=max_chunk_value,\n dtype=dtype,\n )\n\n raise ValueError(f'Unsupported encoding: {encoding}')", "def get_chunks(list_object, chunk_size):\n size = len(list_object)\n if size <= chunk_size:\n yield list_object\n else:\n chunks_nb = math.ceil(size / chunk_size)\n iter_ints = range(0, chunks_nb)\n for i in iter_ints:\n j = i * chunk_size\n if i + 1 < chunks_nb:\n k = j + chunk_size\n yield list_object[max(j - 1, 0):k]\n else:\n yield list_object[max(j - 1, 0):]", "def chunks(data, overrides = {}):\n counter, filesize = 0, len(data)\n last = None\n while counter < filesize:\n try:\n magic, size = chunk.unpack_from(data, counter)\n except struct_error as e:\n print('failed loading chunk from', data[:counter])\n print('last chunk:', last)\n raise e\n\n counter += chunk.size\n contents = data[counter:counter+size]\n\n if magic[3] != 0x4D:\n raise Exception('bad magic', magic, 'last chunk:', last)\n\n if magic in overrides:\n size = overrides[magic]\n\n yield magic, size, contents\n counter += size\n\n last = (magic, size, contents)", "def chunks(iterable, size):\n it = iter(iterable)\n chunk = tuple(itertools.islice(it, size))\n while chunk:\n yield chunk\n chunk = tuple(itertools.islice(it, size))", "def create_batches(data_size, batch_size, shuffle=True):\r\n batches = []\r\n ids = list(range(data_size))\r\n if shuffle:\r\n random.shuffle(ids)\r\n for i in range(int(data_size / batch_size)):\r\n start = i * batch_size\r\n end = (i + 1) * batch_size\r\n batches.append(ids[start:end])\r\n # the batch of which the length is less than batch_size\r\n rest = data_size % batch_size\r\n if rest > 0:\r\n batches.append(list(ids[-rest:]) + [-1] * (batch_size - rest)) # -1 as padding\r\n return batches", "def num_47():\n\n def block_reshape(a, rows, cols, nodata=-1, as_masked=True):\n \"\"\" \"\"\"\n s = np.array(a.shape)\n w = np.array([rows, cols])\n m = divmod(s, w)\n new_shape = w*m[0] + w*(m[1]!=0)\n ypad, xpad = new_shape - a.shape\n pad = ((0, ypad), (0, xpad))\n p_with =((nodata, nodata), (nodata, nodata))\n b = np.pad(a, pad_width=pad, mode='constant', constant_values=p_with)\n w_y, w_x = w # Blocksize\n y, x = b.shape # padded array\n c = b.reshape((y//w_y, w_y, x//w_x, w_x))\n c = c.swapaxes(1, 2).reshape(-1, w_y, w_x)\n if as_masked:\n mask_val = nodata\n c = np.ma.masked_equal(c, mask_val)\n c.set_fill_value(mask_val)\n return b, c\n y, x = 5, 6\n rows, cols = [3, 4]\n nodata = -1\n a = np.arange(x*y).reshape(y,x)\n b, c = block_reshape(a, rows, cols, nodata)\n print(\"\\n{}\".format(num_47.__doc__))\n print(\"a\\n{}\\nb\\n{}\\nc\\n{}\".format(a, b, c))\n return a, b, c", "def iter_chunks(chunksize, *iterables):\n iterables = iter(zip(*iterables))\n\n while 1:\n chunk = tuple(islice(iterables, chunksize))\n\n if not chunk:\n return\n\n yield chunk", "def chunk(max_elems = 8192, dtype = numpy.float64):\n\n @filters\n def _dagpype_internal_fn_act(target):\n assert max_elems > 0\n dtype_ = dtype\n\n l = []\n try:\n while True:\n while len(l) < max_elems:\n l.append((yield))\n target.send(numpy.array(l, dtype = dtype_))\n l = []\n except GeneratorExit:\n if len(l) > 0:\n target.send(numpy.array(l, dtype = dtype_)) \n \n return _dagpype_internal_fn_act", "def _get_split_sizes(self, n_examples):\n\n min_ex = (int(n_examples // self.n_splits)\n * np.ones(self.n_splits, dtype=np.int8))\n \n rem = np.array(\n [1 if i < n_examples % self.n_splits else 0\n for i in range(self.n_splits)],\n dtype=np.int8)\n\n return np.add(min_ex, rem)", "def pack_unpack_extreme():\n # this will create a huge array, and then use the\n # blosc.BLOSC_MAX_BUFFERSIZE as chunk-szie\n pack_unpack(300, chunk_size=blosc.BLOSC_MAX_BUFFERSIZE,\n progress=simple_progress)", "def chunk(self, shape, split) -> NotImplementedError:\n raise NotImplementedError()", "def get_shape_for_tile_split(\n arr_height: int, arr_width: int, nchannels: int, tile_height: int, tile_width: int\n) -> list[int]:\n shape = [\n arr_height // tile_height,\n tile_height,\n arr_width // tile_width,\n tile_width,\n ]\n if nchannels > 1:\n shape.append(nchannels)\n return shape", "def get_num_chunks(self) -> int:", "def batch_by_size(iterable, max_buffer=20000):\n all_batches = []\n current_batch = []\n current_size = 0\n\n for next_item in iterable:\n # An approximated way to determine size\n next_size = len(str(next_item))\n expected_total_size = current_size + next_size\n\n if next_size > max_buffer:\n raise BufferExceedError('Buffer exceeded')\n\n elif expected_total_size > max_buffer:\n # If expected to exceed max size, then current batch is finalized\n all_batches.append(current_batch)\n current_batch = [next_item]\n current_size = next_size\n\n else:\n # Else add current set of instructions to current batch\n current_batch.append(next_item)\n current_size = expected_total_size\n\n # Group remaining instructions as a single batch\n if len(current_batch) > 0:\n all_batches.append(current_batch)\n\n return all_batches", "def _split_on_chunks(self, iterable, size):\n return utils.split_on_chunks(iterable, size)", "def create_batches(data_size, batch_size, shuffle=True):\r\n batches = []\r\n ids = list(range(data_size))\r\n if shuffle:\r\n random.shuffle(ids)\r\n for i in range(int(data_size / batch_size)):\r\n start = i * batch_size\r\n end = (i + 1) * batch_size\r\n batches.append(ids[start:end])\r\n rest = data_size % batch_size\r\n if rest > 0:\r\n batches.append(ids[-rest:] + [-1] * (batch_size - rest)) # -1 as padding\r\n return batches", "def get_chunks(sequence, chunk_size):\n segments = []\n for i in range(0, len(sequence), chunk_size):\n tmp = sequence[i:chunk_size+i]\n if len(tmp) == chunk_size:\n segments.append(tmp)\n return segments", "def split_audio_into_chunks(sampling_rate, amplitude_vector, chunk_size):\n \n col_size = int(chunk_size / ((1 / sampling_rate) * 1000))\n whole = int(len(amplitude_vector) / col_size)\n first_partition_index = whole*col_size\n first_partition = amplitude_vector[:first_partition_index]\n second_partition = amplitude_vector[first_partition_index:]\n return first_partition.reshape((whole, col_size)), second_partition", "def test_chunk_memory(self):\n layer = tl.Serial(tl.Dense(1024*1024), tl.Dense(128))\n chunked = tl.Chunk(layer, 256)\n x = np.random.uniform(size=(16*1024, 16))\n chunked.init(shapes.signature(x))\n y = chunked(x)\n z = tl.Accelerate(chunked)(x)\n self.assertEqual(y.shape, (16*1024, 128))\n self.assertEqual(z.shape, (16*1024, 128))", "def _partitioner(shape, dtype):\n if not isinstance(shape, tensor_shape.TensorShape):\n raise ValueError(f\"shape is not a TensorShape: {shape}\")\n if not shape.is_fully_defined():\n raise ValueError(f\"shape is not fully defined: {shape}\")\n if not isinstance(dtype, dtypes.DType):\n raise ValueError(f\"dtype is not a DType: {dtype}\")\n\n if dtype.base_dtype == dtypes.string:\n element_size = bytes_per_string_element\n else:\n element_size = dtype.size\n\n partitions = [1] * shape.ndims\n bytes_per_slice = 1.0 * (\n shape.num_elements() / shape.dims[axis].value) * element_size\n # How many slices can we fit on one shard of size at most max_shard_bytes?\n # At least one slice is required.\n slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))\n # How many shards do we need for axis given that each shard fits\n # slices_per_shard slices from a total of shape[axis] slices?\n axis_shards = int(math.ceil(\n 1.0 * shape.dims[axis].value / slices_per_shard))\n if max_shards:\n axis_shards = min(max_shards, axis_shards)\n\n partitions[axis] = axis_shards\n\n return partitions", "def get_chunks(self, data, scale=1):\r\n x_chunks, y_chunks = [(0, self.rows)], [(0, self.cols)]\r\n if data.shape[0] > self.rows:\r\n x_chunks = self.perform_chunking(data.shape[0], self.rows)\r\n else:\r\n x_chunks = [(0, data.shape[0])]\r\n if data.shape[1] > self.cols:\r\n y_chunks = self.perform_chunking(data.shape[1], self.cols)\r\n else:\r\n y_chunks = [(0, data.shape[1])]\r\n return x_chunks, y_chunks", "def chunkize_serial(iterable, chunksize, as_numpy=False, dtype=np.float32):\n it = iter(iterable)\n while True:\n if as_numpy:\n # convert each document to a 2d numpy array (~6x faster when transmitting\n # chunk data over the wire, in Pyro)\n wrapped_chunk = [[np.array(doc, dtype=dtype) for doc in itertools.islice(it, int(chunksize))]]\n else:\n wrapped_chunk = [list(itertools.islice(it, int(chunksize)))]\n if not wrapped_chunk[0]:\n break\n # memory opt: wrap the chunk and then pop(), to avoid leaving behind a dangling reference\n yield wrapped_chunk.pop()", "def estimate_size(shape):\n total_bytes = reduce(np.multiply, shape) * 8\n return total_bytes / 1E6", "def AllBipartitionsAllSizes(data):\n # 1) FIND ALL THE COMBINATIONS UP TO LENGTH < len(data)/2\n bipartitions = []\n for n in range(1, Nmax):\n # 1.1) Find all combinations of given size out of dataset\n combinations = [c for c in itertools.combinations(data, n)]\n # 1.2) Sort and find complementary sets\n for comb in combinations:\n complementary = tuple(setdata - set(comb))\n bipartitions.append((comb, complementary))\n\n # 2) FIND AND SORT THE BIPARTITIONS OF SIZE Nmax\n # 2.1) Find all combinations of size Nmax\n combinations = [c for c in itertools.combinations(data, Nmax)]\n # 2.2) Sort and find complementary sets\n ncombs = len(combinations)\n # Ignore repeated combinations if both subsets are of size = Nmax\n if iseven: ncombs = ncombs // 2\n\n for i in range(ncombs):\n comb = combinations[i]\n combset = set(comb)\n complementary = setdata - combset\n bipartitions.append((comb, tuple(complementary)))\n\n return bipartitions", "def get_chunks(sequence, window_size, step=1):\n # get the sequence length\n k = len(sequence)\n # get the index for each end and chunk\n for i in range(0, k - window_size + 1, step):\n # generate the end of the window\n end = i + window_size\n # get the slice of the sequence\n chunk = sequence[i:i + window_size]\n # assure the the chunk is the expected size\n assert len(chunk) == window_size\n yield chunk, end", "def test_super_chunk(self):\n chunksize = MAX_SINGLE_UPLOAD_SIZE + 1\n size = MAX_SINGLE_UPLOAD_SIZE * 2\n self.assertEqual(find_chunksize(size, chunksize),\n MAX_SINGLE_UPLOAD_SIZE)", "def get_chunks(sequence, window_size, step=1):\n k = len(sequence)\n for i in range(0, k - window_size + 1, step):\n end = i + window_size\n chunk = sequence[i:i + window_size]\n assert len(chunk) == window_size\n yield chunk, end", "def _chunk_windows(windows, num_chunks):\n if num_chunks <= 0 or int(num_chunks) != num_chunks:\n raise ValueError(\"Number of chunks must be an integer > 0\")\n num_chunks = min(len(windows) - 1, num_chunks)\n splits = np.array_split(windows[:-1], num_chunks)\n chunks = []\n for j in range(num_chunks - 1):\n chunk = np.append(splits[j], splits[j + 1][0])\n chunks.append(chunk)\n chunk = np.append(splits[-1], windows[-1])\n chunks.append(chunk)\n return chunks", "def fit(blocks, size):\r\n assert(len(blocks) > 0)\r\n assert(size >= min_width(blocks))\r\n if len(blocks) == 1:\r\n return [grules.EMPTY * i + grules.FILLED * blocks[0] + grules.EMPTY * (size - blocks[0] - i) \\\r\n for i in range(size - blocks[0] + 1)]\r\n else:\r\n return [grules.EMPTY * (i - blocks[0]) + grules.FILLED * blocks[0] + grules.EMPTY + f2 \\\r\n for i in range(blocks[0], size - min_width(blocks[1:])) \\\r\n for f2 in fit(blocks[1:], size - i - 1)]", "def chunk(size, seq):\n if not isinstance(size, int) or size <= 0: # pragma: no cover\n raise ValueError(\"size must be an integer greater than zero\")\n\n group = []\n\n for item in seq:\n if len(group) >= size:\n yield group\n group = []\n group.append(item)\n\n if group:\n yield group", "def next_chunk(self):\n\n def sym_base_handler(base):\n l.warning(\"A computed chunk base is symbolic; maximizing it\")\n return self.state.solver.max_int(base)\n\n base = concretize(self.base + self.get_size(), self.state.solver, sym_base_handler)\n if base >= self.heap.heap_base + self.heap.heap_size - 2 * self._chunk_size_t_size:\n return None\n else:\n return PTChunk(base, self.state)", "def generate_block_sizes(n_features, blocks, shorten_last_block=False):\n # If blocks is an int, divide in blocks of equal size.\n if isinstance(blocks, int):\n if n_features % blocks != 0 and not shorten_last_block:\n raise ValueError('The parameter \"n_features\" must be '\n f'divisible by \"blocks\" ({blocks})')\n\n div, mod = divmod(n_features, blocks)\n blocks = [blocks] * div\n if mod != 0:\n blocks += [mod]\n elif n_features != sum(blocks):\n raise ValueError('The sum of the block sizes must be equal to '\n f'\"n_features\" ({n_features}).')\n\n return blocks", "def _make_chunk_size(self, req_size):\n size = req_size\n size += 2 * self._chunk_size_t_size # Two size fields\n size = self._chunk_min_size if size < self._chunk_min_size else size\n if size & self._chunk_align_mask: # If the chunk would not be aligned\n size = (size & ~self._chunk_align_mask) + self._chunk_align_mask + 1 # Fix it\n return size", "def _align_chunk_info(chunk_info):\n max_dumps = max(info['shape'][0] for info in chunk_info.values())\n for key, info in chunk_info.items():\n shape = info['shape']\n n_dumps = shape[0]\n if n_dumps < max_dumps:\n info['shape'] = (max_dumps,) + shape[1:]\n # We could just add a single new chunk, but that could cause an\n # inconveniently large chunk if there is a big difference between\n # n_dumps and max_dumps.\n time_chunks = info['chunks'][0] + (max_dumps - n_dumps) * (1,)\n info['chunks'] = (time_chunks,) + info['chunks'][1:]\n logger.debug('Adding %d phantom dumps to array %s', max_dumps - n_dumps, key)\n return chunk_info", "def get_chunks(self,file_size):\n chunk_start = 0\n chunk_size = 0xA00000 # 10485760 bytes, default max ssl buffer size\n while chunk_start + chunk_size <= file_size:\n yield(chunk_start, chunk_size)\n chunk_start += chunk_size\n final_chunk_size = file_size - chunk_start\n yield(chunk_start, final_chunk_size)", "def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]", "def gen_shape(size):\n\n def _factors(n):\n \"\"\" Returns the divisors of n\n\n >>> _factors(4)\n {1, 2, 4}\"\"\"\n gen = ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)\n return set(sum(gen, []))\n\n assert size > 0\n if size == 1:\n return (1,)\n\n shape = []\n rem = int(size / np.prod(shape))\n while rem > 1:\n if len(shape) > 6:\n shape.append(rem)\n break\n\n shape.append(np.random.choice(list(_factors(rem))))\n rem = int(size / np.prod(shape))\n\n return tuple(int(i) for i in shape)", "def get_arraymax_section_data(key_list, max_size=1300000000):\n result_list = []\n data_key_list = DATA_BLOCK_SPEC.keys()\n for key in key_list:\n if key in data_key_list:\n result_list.append(max_size)\n else:\n result_list.append(0)\n return np.array(result_list)", "def test_small_chunk(self):\n chunksize = 7 * (1024 ** 2)\n size = 8 * (1024 ** 2)\n self.assertEqual(find_chunksize(size, chunksize), chunksize)" ]
[ "0.644044", "0.629798", "0.61731964", "0.60461324", "0.598301", "0.5948597", "0.59229267", "0.5806321", "0.57971877", "0.5785244", "0.57549477", "0.56843406", "0.5641695", "0.5637497", "0.56076527", "0.5582129", "0.55698544", "0.556121", "0.55578834", "0.5544379", "0.5536151", "0.5526536", "0.55035055", "0.5464144", "0.5453769", "0.5418642", "0.5414757", "0.5408439", "0.53291434", "0.53081846", "0.5299798", "0.5293532", "0.52705276", "0.5258062", "0.5239482", "0.5238444", "0.52319276", "0.51933086", "0.5182566", "0.51787955", "0.5177845", "0.51512223", "0.5148423", "0.51367587", "0.51330996", "0.5129743", "0.5125498", "0.5121102", "0.5119387", "0.51151234", "0.5103192", "0.5096057", "0.5094332", "0.5089588", "0.50828457", "0.5079025", "0.5074277", "0.5065089", "0.5051747", "0.5046208", "0.5043506", "0.50371295", "0.50350344", "0.50227654", "0.5017293", "0.50140345", "0.49973375", "0.49959415", "0.49891874", "0.49868128", "0.4977773", "0.4973126", "0.4971641", "0.49701044", "0.4970014", "0.49669513", "0.49591425", "0.49573186", "0.4955999", "0.49534026", "0.49501735", "0.49471956", "0.4939945", "0.4934027", "0.49337912", "0.49330652", "0.49250522", "0.4912394", "0.49041754", "0.49016374", "0.49002683", "0.48828706", "0.48816505", "0.48754862", "0.48743537", "0.48732787", "0.48701113", "0.48672125", "0.48662856", "0.48578513" ]
0.7806975
0
Convenience function to normalize the `chunks` argument for an array with the given `shape`.
def normalize_chunks(chunks: Any, shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]: # N.B., expect shape already normalized # handle auto-chunking if chunks is None or chunks is True: return guess_chunks(shape, typesize) # handle no chunking if chunks is False: return shape # handle 1D convenience form if isinstance(chunks, numbers.Integral): chunks = tuple(int(chunks) for _ in shape) # handle bad dimensionality if len(chunks) > len(shape): raise ValueError("too many dimensions in chunks") # handle underspecified chunks if len(chunks) < len(shape): # assume chunks across remaining dimensions chunks += shape[len(chunks) :] # handle None or -1 in chunks if -1 in chunks or None in chunks: chunks = tuple(s if c == -1 or c is None else int(c) for s, c in zip(shape, chunks)) chunks = tuple(int(c) for c in chunks) return chunks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _normalize_shape(shape):\n\n if isinstance(shape, (np.integer, int)):\n if shape < 1:\n raise ValueError(\"shape value must be greater than 0: %d\"\n % shape)\n shape = (shape,) # N is a shorthand for (N,)\n try:\n shape = tuple(shape)\n except TypeError:\n raise TypeError(\"shape must be an integer or sequence: %r\"\n % (shape,))\n\n # XXX Get from HDF5 library if possible.\n # HDF5 does not support ranks greater than 32\n if len(shape) > 32:\n raise ValueError(\n f\"shapes with rank > 32 are not supported: {shape!r}\")\n\n return tuple(SizeType(s) for s in shape)", "def normalize(shape):\n s = shape\n matrix = Shape.get_matrix(s.get_vector())\n norm_x = math.sqrt(sum(matrix[:, 0] ** 2))\n norm_y = math.sqrt(sum(matrix[:, 1] ** 2))\n for pt in s.pts:\n pt.x /= norm_x\n pt.y /= norm_y\n return s", "def _batchify(data: nd.NDArray, batch_size):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = len(data) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data[0: nbatch * batch_size]\n # Evenly divide the data across the bsz batches.\n data = data.reshape(batch_size, -1).transpose()\n # if torch.cuda.is_available():\n # data = data.cuda()\n return data", "def normalize_array(array):\n\n return array / np.sum(array, axis=1)[:, np.newaxis]", "def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr", "def normalize(a, axis=None):\n a_sum = a.sum(axis)\n if axis and a.ndim > 1:\n a_sum[a_sum == 0] = 1\n shape = list(a.shape)\n shape[axis] = 1\n a_sum.shape = shape\n\n return a / a_sum", "def rechunk(array, chunk_size=None, chunk_overlap=None):\n\n # deal with chunk sizes\n ds = array.datashape.copy()\n if chunk_size is None:\n chunk_size = ds.chunk_size\n if isinstance(chunk_size, int):\n chunk_size = [chunk_size] * ds.ndim\n ds.chunk_size = chunk_size\n\n if chunk_overlap is None:\n chunk_overlap = ds.chunk_overlap\n if isinstance(chunk_overlap, int):\n chunk_overlap = [chunk_overlap] * ds.ndim\n ds.chunk_overlap = chunk_overlap\n\n if ds != array.datashape:\n array = array.redimension(ds.schema)\n return array", "def normalize_shape(shape: Union[int, Tuple[int, ...], None]) -> Tuple[int, ...]:\n\n if shape is None:\n raise TypeError(\"shape is None\")\n\n # handle 1D convenience form\n if isinstance(shape, numbers.Integral):\n shape = (int(shape),)\n\n # normalize\n shape = cast(Tuple[int, ...], shape)\n shape = tuple(int(s) for s in shape)\n return shape", "def normalize_chunks(\n chunks: Mapping[str, Union[int, Tuple[int, ...]]],\n dim_sizes: Mapping[str, int],\n) -> Dict[str, int]:\n if not chunks.keys() <= dim_sizes.keys():\n raise ValueError(\n 'all dimensions used in chunks must also have an indicated size: '\n f'chunks={chunks} vs dim_sizes={dim_sizes}')\n result = {}\n for dim, size in dim_sizes.items():\n if dim not in chunks:\n result[dim] = size\n elif isinstance(chunks[dim], tuple):\n unique_chunks = set(chunks[dim])\n if len(unique_chunks) != 1:\n raise ValueError(\n f'chunks for dimension {dim} are not constant: {unique_chunks}',\n )\n result[dim], = unique_chunks\n elif chunks[dim] == -1:\n result[dim] = size\n else:\n result[dim] = chunks[dim]\n return result", "def roi_normalise(roi, shape):\n\n def fill_if_none(x, val_if_none):\n return val_if_none if x is None else x\n\n def norm_slice(s, n):\n start = fill_if_none(s.start, 0)\n stop = fill_if_none(s.stop, n)\n start, stop = [x if x >= 0 else n+x for x in (start, stop)]\n return slice(start, stop, s.step)\n\n if not isinstance(shape, collections.abc.Sequence):\n shape = (shape,)\n\n if isinstance(roi, slice):\n return norm_slice(roi, shape[0])\n\n return tuple([norm_slice(s, n) for s, n in zip(roi, shape)])", "def normalised(a: np.ndarray, order: int = None, axis: int = -1) -> np.ndarray:\n norm = np.atleast_1d(np.linalg.norm(a, order, axis))\n return a / np.expand_dims(norm, axis)", "def normalize(arr: np.ndarray) -> np.ndarray:\n if max(arr) - min(arr) == 0:\n logger.warning(\n \"Normalize averted a div/0, the input data was:\\n {0}\".format(arr)\n )\n return np.ones(len(arr))\n return (arr - min(arr)) / (max(arr) - min(arr))", "def array_rebin(data, shape):\n\n # Ensure dimensions are consistent\n assert data.ndim == len(shape)\n assert data.shape[0] % shape[0] == 0\n assert data.shape[1] % shape[1] == 0\n assert data.shape[2] % shape[2] == 0\n\n # Get pairs of (shape, bin factor) for each dimension\n factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])\n\n # Rebin the array\n data = data.reshape(factors.flatten())\n for i in range(len(shape)):\n data = data.sum(-1 * (i + 1))\n return data", "def _fit_array_to_image(base_shape, array: np.ndarray) -> np.ndarray:\n shape = list(array.shape)\n for i, el in enumerate(base_shape):\n if el == 1 and el != shape[i]:\n shape.insert(i, 1)\n elif el != shape[i]:\n raise ValueError(f\"Wrong array shape {shape} for {base_shape}\")\n if len(shape) != len(base_shape):\n raise ValueError(f\"Wrong array shape {shape} for {base_shape}\")\n return np.reshape(array, shape)", "def BatchNormalize(S):\n mu = np.mean(S, axis=0)\n v = np.mean((S-mu)**2, axis=0)\n S = (S - mu) / np.sqrt(v + epsilon)\n return S", "def normalize(arr):\n\n total = sum(arr)\n\n return list(map(lambda x: x / total, arr))", "def normalize_data(batch_data):\n B, N, C = batch_data.shape\n normal_data = np.zeros((B, N, C))\n for b in range(B):\n pc = batch_data[b]\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))\n pc = pc / m\n normal_data[b] = pc\n return normal_data", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def _normalize_sequence(arr, rank):\n if hasattr(arr, \"__iter__\") and not isinstance(arr, str):\n if isinstance(arr, cupy.ndarray):\n arr = cupy.asnumpy(arr)\n normalized = list(arr)\n if len(normalized) != rank:\n err = \"sequence argument must have length equal to arr rank\"\n raise RuntimeError(err)\n else:\n normalized = [arr] * rank\n return normalized", "def normalize(array, inplace=False):\n if inplace:\n array -= ds_mean\n array /= ds_std\n else:\n array = (array - ds_mean) / ds_std\n return array", "def normalize(nparray, order=2, axis=0):\n norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)\n return nparray / (norm + np.finfo(np.float32).eps)", "def __preprocess(data, sample_size: int = 200000):\n mean = data[:sample_size].mean(axis=0)\n data -= mean\n stdev = data[:sample_size].std(axis=0)\n data /= stdev\n return data", "def normalize_batch(batch, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n # normalize using imagenet mean and std\n batch = batch.clone()\n mean = torch.tensor(mean).view(-1, 1, 1)\n std = torch.tensor(std).view(-1, 1, 1)\n # if your image data is scaled to scale 0-255, uncomment the line below\n # batch.div_(255.0)\n return (batch - mean) / std", "def BatchNormalization(inputs, data_format):\n return tf.layers.BatchNormalization(axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY,\n epsilon=_BATCH_NORM_EPSILON,\n scale=True)(inputs)", "def __normalize_after_fft(arr):\n\n n1, n2 = arr.shape[0], arr.shape[1]\n for i in range(n1):\n for j in range(n2):\n arr[i, j] *= n1 * n2\n\n return arr", "def normalize(array):\n array_min, array_max = array.min(), array.max()\n return ((array - array_min)/(array_max - array_min))", "def normalize(arr, stats=False):\n arr = np.array(arr)\n mean = arr.mean()\n std = arr.std()\n normed = (arr - mean) / std\n if not stats:\n return normed\n return normed, mean, std", "def normalize(arr, eps):\n\n norm = cuda.reduce('T x', 'T out',\n 'x * x', 'a + b', 'out = sqrt(a)', 0,\n 'norm_sn')(arr)\n cuda.elementwise('T norm, T eps',\n 'T x',\n 'x /= (norm + eps)',\n 'div_sn')(norm, eps, arr)\n return norm", "def normalize(v):\n\tdim = v.shape \n\tfor i in range(0, dim[0]-1):\n\t\tv[i,:,:] = (v[i,:,:].T/np.sum(v[i,:,:],1)).T\n\n\treturn v", "def reshape(x, shape):\n return Reshape(shape)(x)", "def batchify(data, batch_size, args):\n # Work out how cleanly we can divide the dataset into batch_size parts (i.e. continuous seqs).\n nbatch = data.size(0) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * batch_size)\n # Evenly divide the data across the batch_size batches.\n data = data.view(batch_size, -1)\n if args.cuda:\n data = data.cuda()\n return data", "def unchunkify(chunks):\n recreated_chunks = list(map(lambda x: np.fft.irfft(combine_phase_and_power(*x)), chunks))\n total_length = len(recreated_chunks) * CHUNK_SIZE // 2\n output = np.zeros(total_length)\n window = np.power(np.sin(np.linspace(0, np.pi, CHUNK_SIZE)), 2)\n \n for i, j in enumerate(xrange(0, total_length - CHUNK_SIZE, CHUNK_SIZE // 2)):\n o = window * recreated_chunks[i]\n \n output[j: j+CHUNK_SIZE] += o\n return output", "def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.compat.v1.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)", "def normalize(array, norm=\"l2\"):\n scaler = Normalizer(copy=True, norm=norm)\n return scaler.fit_transform(array)", "def __initialize_from_shape_and_chunk_size(self, shape: Tuple[int, ...], chunk_size: Tuple[int, ...]):\n collection_shape = tuple(\n shape[i] // chunk_size[i] + int(shape[i] % chunk_size[i] != 0) for i in range(len(shape)))\n objects = np.empty(collection_shape, dtype=MultiDimensionalSlice)\n num_chunks = [shape[i] // chunk_size[i] + int(shape[i] % chunk_size[i] != 0) for i in range(len(shape))]\n indexes = [range(num_chunks[i]) for i in range(len(shape))]\n\n for indx in product(*indexes):\n current_slice = tuple(\n slice(chunk_size[i] * indx[i], min(chunk_size[i] * (indx[i] + 1), shape[i])) for i in range(len(shape)))\n objects[indx] = MultiDimensionalSlice(indices=indx, slices=current_slice)\n self.__initialize_from_array(objects_array=objects)", "def normalize(array):\n high = array.max()\n low = array.min()\n rng = high - low\n array[:] = 1.0 - ((high - array) / rng)", "def normalize(array):\n\n # calculate the mean of array\n array_mean = numpy.mean(array)\n if _DEBUG:\n print \"Mean of gr is:\"\n print array_mean\n\n # divide all elements by the mean\n norm_list = []\n for item in array:\n norm_list.append(item/array_mean - 1)\n\n # return the result\n return norm_list", "def _normalize_coordinates(\n target_size: int, coords: np.ndarray, original_size: Tuple[int, int], is_bounding_box=False\n) -> np.ndarray:\n old_height, old_width = original_size\n\n scale = target_size * 1.0 / max(old_height, old_width)\n new_height, new_width = old_height * scale, old_width * scale\n new_width = int(new_width + 0.5)\n new_height = int(new_height + 0.5)\n\n coords = deepcopy(coords).astype(float)\n\n if is_bounding_box:\n coords = coords.reshape(-1, 2, 2)\n\n coords[..., 0] = coords[..., 0] * (new_width / old_width)\n coords[..., 1] = coords[..., 1] * (new_height / old_height)\n\n if is_bounding_box:\n coords = coords.reshape(-1, 4)\n\n return coords", "def unchanged_shape(input_shape):\n return input_shape", "def _optimizeshape(shape):\n shape.sort()\n if ORDER == 'C':\n shape[:] = shape[::-1]", "def split_array(array: np.ndarray, parts: int):\n\n if parts == -1:\n parts = array.size\n shape = array.shape\n possible_chunk_sizes = []\n # Generate all possible chunk sizes for the given array shape\n for chunk_size in product(*[range(1, shape[i] + 1) for i in range(len(shape))]):\n # Check if the number of chunks generated by the current chunk size is equal to the desired number of parts\n if np.prod(\n [shape[i] // chunk_size[i] + int(shape[i] % chunk_size[i] != 0) for i in range(len(shape))]) == parts:\n possible_chunk_sizes.append(chunk_size)\n # Sort the possible chunk sizes in ascending order of the sum of the squares of their dimensions\n possible_chunk_sizes.sort(key=lambda x: np.sum(np.array(x) ** 2)) # type: ignore\n if not possible_chunk_sizes:\n logging.warning(\"Could not divide the domain in %d parts. Trying with parts=%d.\", parts, parts - 1)\n return split_array(array=array, parts=parts - 1)\n selected_chunk_size = possible_chunk_sizes[0]\n\n chunks = []\n # Get the number of chunks for the first possible chunk size\n num_chunks = [shape[i] // selected_chunk_size[i] + int(shape[i] % selected_chunk_size[i] != 0) for i in\n range(len(shape))]\n indexes = [range(num_chunks[i]) for i in range(len(shape))]\n # Iterate over the chunks and append the corresponding slice of the array to the chunks list\n for indx in product(*indexes):\n current_slice = tuple(\n slice(selected_chunk_size[i] * indx[i], min(selected_chunk_size[i] * (indx[i] + 1), shape[i])) for i in\n range(len(shape)))\n chunks.append(array[current_slice])\n return chunks", "def guess_chunks(shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]:\n\n ndims = len(shape)\n # require chunks to have non-zero length for all dimensions\n chunks = np.maximum(np.array(shape, dtype=\"=f8\"), 1)\n\n # Determine the optimal chunk size in bytes using a PyTables expression.\n # This is kept as a float.\n dset_size = np.prod(chunks) * typesize\n target_size = CHUNK_BASE * (2 ** np.log10(dset_size / (1024.0 * 1024)))\n\n if target_size > CHUNK_MAX:\n target_size = CHUNK_MAX\n elif target_size < CHUNK_MIN:\n target_size = CHUNK_MIN\n\n idx = 0\n while True:\n # Repeatedly loop over the axes, dividing them by 2. Stop when:\n # 1a. We're smaller than the target chunk size, OR\n # 1b. We're within 50% of the target chunk size, AND\n # 2. The chunk is smaller than the maximum chunk size\n\n chunk_bytes = np.prod(chunks) * typesize\n\n if (\n chunk_bytes < target_size or abs(chunk_bytes - target_size) / target_size < 0.5\n ) and chunk_bytes < CHUNK_MAX:\n break\n\n if np.prod(chunks) == 1:\n break # Element size larger than CHUNK_MAX\n\n chunks[idx % ndims] = math.ceil(chunks[idx % ndims] / 2.0)\n idx += 1\n\n return tuple(int(x) for x in chunks)", "def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)", "def _normalize(array):\n\treturn (array - np.min(array))/(np.max(array)-np.min(array))", "def normalize(array):\n\treturn array/np.max(array)", "def batch_normalization(x, phase_train, out_size):\r\n with tf.variable_scope('bn'):\r\n beta = tf.Variable(tf.constant(0.0, shape=[out_size]),\r\n name='beta', trainable=True)\r\n gamma = tf.Variable(tf.constant(1.0, shape=[out_size]),\r\n name='gamma', trainable=True)\r\n batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\r\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\r\n\r\n def mean_var_with_update():\r\n ema_apply_op = ema.apply([batch_mean, batch_var])\r\n with tf.control_dependencies([ema_apply_op]):\r\n return tf.identity(batch_mean), tf.identity(batch_var)\r\n\r\n mean, var = tf.cond(phase_train,\r\n mean_var_with_update,\r\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\r\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\r\n return normed", "def batch_normalization(x, phase_train, out_size):\n\n\twith tf.variable_scope('bn'):\n\t\tbeta = tf.Variable(tf.constant(0.0, shape=[out_size]), name='beta', trainable=True)\n\t\tgamma = tf.Variable(tf.constant(1.0, shape=[out_size]), name='gamma', trainable=True)\n\t\tbatch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\n\t\tema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n\t\tdef mean_var_with_update():\n\t\t\tema_apply_op = ema.apply([batch_mean, batch_var])\n\t\t\twith tf.control_dependencies([ema_apply_op]):\n\t\t\t\treturn tf.identity(batch_mean), tf.identity(batch_var)\n\n\t\tmean, var = tf.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))\n\t\tnormed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n\treturn normed", "def contract_to_shape(data, shape, dtype=None):\n if dtype is None:\n dtype = data.dtype\n if shape==data.shape:\n return data.astype(dtype)\n slices = []\n for s1, s2 in zip (data.shape, shape):\n slices.append(slice((s1-s2)//2, (s1+s2)//2))\n return data[tuple(slices)].astype(dtype)", "def iter_slices(shape, chunk_size):\n assert len(shape) == len(chunk_size)\n num_grid_chunks = [int(ceil(s / float(c))) for s, c in zip(shape, chunk_size)]\n for grid_index in numpy.ndindex(*num_grid_chunks):\n yield tuple(\n slice(min(d * c, stop), min((d + 1) * c, stop)) for d, c, stop in zip(grid_index, chunk_size, shape))", "def convert_reshape(g, op, block):\n\n input_shape = op.input(\"Shape\")\n input_shape_tensor = op.input(\"ShapeTensor\")\n data = g.get_node(op.input(\"X\")[0])\n if input_shape:\n new_shape = g.get_node(input_shape[0])\n elif input_shape_tensor:\n new_shape = []\n for shape_name in input_shape_tensor:\n shape = g.get_node(shape_name)\n if len(infer_shape(shape)) == 0:\n shape = _op.reshape(shape, [-1])\n new_shape.append(shape)\n new_shape = _op.concatenate(new_shape, axis=0)\n new_shape, infered = try_infer_value(new_shape, parameters=g.get_params())\n if infered:\n new_shape = new_shape.tolist()\n else:\n new_shape = op.attr(\"shape\")\n out = _op.reshape(data, new_shape)\n g.add_node(op.output(\"Out\")[0], out)", "def _normalize_patches(patches):\n patches = array_ops.concat(patches, 0)\n mean, variance = nn.moments(patches, [1, 2, 3], keep_dims=True)\n patches = (patches - mean) / math_ops.sqrt(variance)\n return array_ops.reshape(patches, [array_ops.shape(patches)[0], -1])", "def chunk(self, shape, split) -> NotImplementedError:\n raise NotImplementedError()", "def normalize_and_check_ndim(values, d):\n def normalize(a):\n if isinstance(a, collections.Sequence):\n return np.asarray(a)\n else:\n return a\n values = [normalize(v) for v in values]\n for v in values:\n check_ndim(v, d)\n return values", "def normalize(tensor: np.ndarray):\n if len(tensor.shape) < 4:\n tensor = np.expand_dims(tensor, axis=2)\n mean = np.array([tensor[..., chn, :].mean() for chn in range(tensor.shape[2])])\n std = np.array([tensor[..., chn, :].std() for chn in range(tensor.shape[2])])\n return (tensor - mean[:, np.newaxis]) / std[:, np.newaxis]", "def reshape(x, shape):\n return float(x) if shape is None else jnp.reshape(x, shape)", "def normalize_values(values: ArrayLike, norm: str | float | bool = True) -> np.ndarray:\n values = np.asarray(values)\n assert norm\n\n if isinstance(norm, str):\n if norm == \"first\":\n divisor = values[0]\n elif norm == \"max\":\n divisor = max(values)\n else:\n raise ValueError(f\"Invalid normalization, got {norm=}\")\n else:\n divisor = float(norm)\n\n return values / divisor", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def reduce(arr, factor=1, how=np.mean):\n arr = np.asarray(arr)\n shape = list(arr.shape)\n newshape = shape[:-2] + [np.round(shape[-2] / factor).astype(int), factor,\n np.round(shape[-1] / factor).astype(int), factor]\n return how(how(arr.reshape(*newshape), axis=len(newshape)-3),\n axis=len(newshape)-2)", "def batch_norm(in_tensor, phase_train, name, reuse=None, data_format='NHWC', center=True, scale=True):\n axis = -1 if data_format == 'NHWC' else 1\n with tf.variable_scope(name):\n # return tf.contrib.layers.batch_norm(in_tensor, is_training=phase_train, scope=scope, reuse=reuse)\n return tf.layers.batch_normalization(in_tensor, axis=axis, center=center, scale=scale, training=phase_train,\n reuse=reuse, fused=True, momentum=0.99, epsilon=1e-1)", "def _normalize(\n ds: xr.Dataset,\n *,\n dim: Sequence[str],\n kind: str = ADDITIVE,\n) -> xr.Dataset:\n if \"norm\" in ds:\n norm = ds.norm\n else:\n norm = ds.data.mean(dim=dim)\n norm.attrs[\"_group_apply_reshape\"] = True\n\n return xr.Dataset(\n dict(data=apply_correction(ds.data, invert(norm, kind), kind), norm=norm)\n )", "def _normalize(X: np.ndarray) -> np.ndarray:\n # return X * np.sqrt(1 / np.sum(X ** 2, axis=1))[:, None]\n return X * np.sqrt(X.shape[1] / np.sum(X ** 2, axis=1))[:, None]", "def normalize_axis(x, axis, dtype=float):\n x = x.astype(dtype)\n ind_list = [slice(None) for i in range(x.ndim)]\n try:\n for i in range(x.shape[axis]):\n ind_list[axis] = i\n ind = tuple(ind_list)\n minn = x[ind].min()\n maxx = x[ind].max()\n x[ind] = (x[ind]-minn) / (maxx-minn)\n except IndexError:\n raise np.AxisError(\n \"axis {} is out of bounds for array of dimension {}\".format(\n axis, x.ndim\n )\n )\n return x", "def reshape(ring_buffer, shape):\n try:\n buffer = ring_buffer._data\n except AttributeError:\n buffer = ring_buffer\n\n new_shape = get_shape(shape)\n myshape = get_shape(buffer.shape)\n if new_shape[1] == 0:\n new_shape = (new_shape[0], 1) + new_shape[2:]\n\n if new_shape[0] == -1:\n try: # Only change the column shape\n buffer.shape = new_shape\n except ValueError: # Change the entire array shape\n rows = int(np.ceil(myshape[0]/new_shape[1]))\n new_shape = (rows, ) + new_shape[1:]\n buffer.resize(new_shape, refcheck=False)\n\n else:\n # Force proper sizing\n buffer.resize(new_shape, refcheck=False)\n\n # Clear the buffer if it did anything but grow in length\n # if not (new_shape[0] > myshape[0] and new_shape[1:] == myshape[1:]):\n try:\n ring_buffer.clear()\n except AttributeError:\n pass", "def preprocess(img, out_shape=None):\n if out_shape is not None:\n img = resize(img, out_shape, mode='constant')\n\n # Normalize the image\n mean = img.mean()\n std = img.std()\n return (img - mean) / std", "def normalize(my_array: np.ndarray) -> np.ndarray:\n\n return np.abs(my_array)/np.max(np.abs(my_array))", "def construct_data_from_chunks(\n self, chunk_shape, data_shape, dtype, filter_pipeline):\n if isinstance(dtype, tuple):\n true_dtype = tuple(dtype)\n dtype_class = dtype[0]\n if dtype_class == 'REFERENCE':\n size = dtype[1]\n if size != 8:\n raise NotImplementedError('Unsupported Reference type')\n dtype = '<u8'\n else:\n raise NotImplementedError('datatype not implemented')\n else:\n true_dtype = None\n\n # create array to store data\n shape = [_padded_size(i, j) for i, j in zip(data_shape, chunk_shape)]\n data = np.zeros(shape, dtype=dtype)\n\n # loop over chunks reading each into the full data array\n count = np.prod(chunk_shape)\n itemsize = np.dtype(dtype).itemsize\n chunk_buffer_size = count * itemsize\n for node in self.all_nodes[0]:\n for node_key, addr in zip(node['keys'], node['addresses']):\n self.fh.seek(addr)\n if filter_pipeline is None:\n chunk_buffer = self.fh.read(chunk_buffer_size)\n else:\n chunk_buffer = self.fh.read(node_key['chunk_size'])\n filter_mask = node_key['filter_mask']\n chunk_buffer = self._filter_chunk(\n chunk_buffer, filter_mask, filter_pipeline, itemsize)\n\n chunk_data = np.frombuffer(chunk_buffer, dtype=dtype)\n start = node_key['chunk_offset'][:-1]\n region = [slice(i, i+j) for i, j in zip(start, chunk_shape)]\n data[tuple(region)] = chunk_data.reshape(chunk_shape)\n\n if isinstance(true_dtype, tuple):\n if dtype_class == 'REFERENCE':\n to_reference = np.vectorize(Reference)\n data = to_reference(data)\n else:\n raise NotImplementedError('datatype not implemented')\n\n non_padded_region = tuple([slice(i) for i in data_shape])\n return data[non_padded_region]", "def reshape(x, shape):\n if x.shape == shape:\n return chainer.as_variable(x)\n y, = Reshape(shape).apply((x,))\n return y", "def transform(self, chunks):\n data = np.array([chunk.flatten() for chunk in chunks])\n\n return data", "def normalize(a):\n a = np.array(a)\n return a / np.linalg.norm(a)", "def normalize_array(arr, method=\"min_max\"):\r\n \r\n ret = torch.tensor(arr)\r\n if method == \"min_max\":\r\n ret -= torch.min(ret)\r\n ret /= torch.max(ret)\r\n elif method == \"mean_std\":\r\n ret -= torch.mean(ret)\r\n ret /= torch.std(ret)\r\n else:\r\n raise Exception(\"Invalid normalization method\")\r\n\r\n return 1 + ret", "def batch_image_preprocess(raw_images,\n image_size: Union[int, Tuple[int, int]],\n mean_rgb,\n stddev_rgb,\n batch_size: int = None):\n if not batch_size:\n # map_fn is a little bit slower due to some extra overhead.\n # map_fn -> vectorized_map (fully parallelizes the batch).\n map_fn = functools.partial(\n image_preprocess,\n image_size=image_size,\n mean_rgb=mean_rgb,\n stddev_rgb=stddev_rgb)\n images, scales = tf.vectorized_map(map_fn, raw_images, warn=False)\n images = tf.stop_gradient(tf.cast(images, tf.float32))\n scales = tf.stop_gradient(tf.cast(scales, tf.float32))\n return (images, scales)\n\n # If batch size is known, use a simple loop.\n scales, images = [], []\n for i in range(batch_size):\n image, scale = image_preprocess(raw_images[i], image_size, mean_rgb,\n stddev_rgb)\n scales.append(scale)\n images.append(image)\n images = tf.stack(images)\n scales = tf.stack(scales)\n return (images, scales)", "def batchify(data, batch_size):\n n_batch = data.shape[0] // batch_size\n data = data[:n_batch * batch_size]\n data = data.reshape((batch_size, n_batch)).T\n return data", "def normalize(self,arr):\n arr = arr/(arr.max()/255.0)\n return arr", "def normalize(arr):\n arr = arr.astype('float')\n # Do not touch the alpha channel\n for i in range(1):\n minval = arr[...,i].min()\n maxval = arr[...,i].max()\n if minval != maxval:\n arr[...,i] -= minval\n arr[...,i] *= (255.0/(maxval-minval))\n return arr", "def standardize_single_array(x):\n if x is None:\n return None\n if tensor_util.is_tensor(x):\n x_shape_ndims = array_ops.rank(x)\n else:\n x_shape_ndims = len(x.shape)\n\n if (x_shape_ndims == 1 and (expected_shape is None or len(expected_shape) != 1)):\n if tensor_util.is_tensor(x):\n x = array_ops.expand_dims(x, axis=1)\n else:\n x = np.expand_dims(x, 1)\n return x", "def normalize(array: np.ndarray, value: float | None = None) -> np.ndarray:\n if value is None:\n val = array.max()\n else:\n val = value\n array = array / val\n return array", "def split(self, array_names: Iterable[str]) -> \"FlattenedStorage\":\n for k in array_names:\n if k not in self._per_element_arrays and k not in self._per_chunk_arrays:\n raise ValueError(f\"Array name {k} not present in FlattenedStorage!\")\n\n split = copy.copy(self)\n for k in list(split._per_element_arrays):\n if k not in array_names:\n del split._per_element_arrays[k]\n else:\n split._per_element_arrays[k] = np.copy(split._per_element_arrays[k])\n for k in list(split._per_chunk_arrays):\n if k not in array_names and k not in (\n \"start_index\",\n \"length\",\n \"identifier\",\n ):\n del split._per_chunk_arrays[k]\n else:\n split._per_chunk_arrays[k] = np.copy(split._per_chunk_arrays[k])\n return split", "def normalizeRows(x):\n N = x.shape[0]\n x /= np.sqrt(np.sum(x ** 2, axis=1)).reshape((N, 1)) + 1e-30\n return x", "def standardize_single_array(x, expected_shape=None):\n if x is None:\n return None\n\n if is_composite_or_composite_value(x):\n return x\n\n if isinstance(x, int):\n raise ValueError(\n 'Expected an array data type but received an integer: {}'.format(x))\n\n if (x.shape is not None and len(x.shape) == 1 and\n (expected_shape is None or len(expected_shape) != 1)):\n if tensor_util.is_tf_type(x):\n x = array_ops.expand_dims(x, axis=1)\n else:\n x = np.expand_dims(x, 1)\n return x", "def normalize(sequence):\n return [_norm(s) for s in sequence]", "def normalize(inp):\n\n out = inp / np.linalg.norm(inp, axis=1, keepdims=True)\n\n return out", "def iterate_array_in_chunks(arr, size: int):\n for i in range(0, len(arr), size):\n yield arr[i:i+size]", "def normalization(channels):\n return GroupNorm32(32, channels)", "def reshape(self, *shape):\n return F.Reshape.apply(self, shape)", "def color_normalize(x, mean, std):\n if x.dim() in {3, 4}:\n if x.size(0) == 1:\n x = x.repeat(3, 1, 1)\n assert x.size(0) == 3, \"For single video format, expected RGB along first dim\"\n for t, m, s in zip(x, mean, std):\n t.sub_(m)\n t.div_(s)\n elif x.dim() == 5:\n assert (\n x.shape[1] == 3\n ), \"For batched video format, expected RGB along second dim\"\n x[:, 0].sub_(mean[0]).div_(std[0])\n x[:, 1].sub_(mean[1]).div_(std[1])\n x[:, 2].sub_(mean[2]).div_(std[2])\n return x", "def demean_normalize(one_d_array):\n\n temp_arr = one_d_array - np.nanmean(one_d_array)\n\n return temp_arr/np.nanstd(temp_arr)", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normalized(a, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(a, order, axis))\n l2[l2==0] = 1\n return a / np.expand_dims(l2, axis)", "def unravel(arrayin, shape = 0):\r\n N = arrayin.shape[0]\r\n n = int(np.sqrt(N/2))\r\n if type(shape) != tuple :\r\n arrayout = arrayin[:N/2].reshape(n,n) + 1.0J * arrayin[N/2:].reshape(n,n)\r\n else :\r\n arrayout = arrayin[:N/2].reshape(shape) + 1.0J * arrayin[N/2:].reshape(shape)\r\n return arrayout", "def normalize_data(data):\n if data.element_spec[0].shape[2] == 1:\n data = data.map(lambda x, y: (tf.image.grayscale_to_rgb(\n tf.image.resize(x, [32, 32])), y))\n else:\n data = data.map(lambda x, y: (tf.image.resize(x, [32, 32]), y))\n normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1. / 255)\n normalized_ds = data.map(lambda x, y: (normalization_layer(x), y))\n return normalized_ds", "def __normalize(input, type, a, b):\n return cv2.normalize(input, None, a, b, type)", "def normalize(data: np.ndarray, axis=0, order=2) -> tuple[np.ndarray, np.ndarray]:\n norms = np.linalg.norm(data, ord=order, axis=axis, keepdims=True)\n return (\n data / norms,\n norms,\n )", "def normalize_features(array):\n \n array_normalized = (array-array.mean())/array.std()\n mu = array.mean()\n sigma = array.std()\n\n return array_normalized, mu, sigma", "def blockshaped(arr, nrows, ncols):\r\n\t h, w = arr.shape\r\n\t return (arr.reshape(h//nrows, nrows, -1, ncols)\r\n\t .swapaxes(1,2)\r\n\t .reshape(-1, nrows, ncols))", "def normalize(input_tensor, output_tensor):\n image_dims = utils.get_img_shape(input_tensor)[1:]\n return output_tensor / np.prod(image_dims)", "async def infer_shape_reshape(track, v, shape):\n shp = await shape['value']\n if shp == ANYTHING:\n shp_t = await shape['type']\n shp = (ANYTHING,) * len(shp_t.elements)\n v_shp = await v['shape']\n if (all(s is not ANYTHING for s in shp) and\n all(s is not ANYTHING for s in v_shp) and\n prod(shp) != prod(v_shp)):\n raise MyiaShapeError(\"Cannot change the total number of elements \"\n \"in reshape\")\n return shp", "def normalize(values):\n return (values - np.mean(values)) / np.std(values)", "def np_normalize_image_channels(img_array, per_channel_mean=None, per_channel_stddev=None, clamp_to_range=False, inplace=False):\n # type: (np.ndarray, np.ndarray, np.ndarray, bool, bool) -> np.ndarray\n if inplace:\n normalized_img_array = img_array.astype(np.float32)\n else:\n normalized_img_array = copy.deepcopy(img_array).astype(np.float32)\n\n if np.min(normalized_img_array) < 0 or np.max(normalized_img_array) > 255:\n raise ValueError('Image values are not in range [0, 255], got [{}, {}]'.format(np.min(normalized_img_array), np.max(normalized_img_array)))\n\n normalized_img_array = ((normalized_img_array/255.0) - 0.5) * 2.0\n\n # Subtract the per-channel-mean from the batch to \"center\" the data.\n if per_channel_mean is not None:\n _per_channel_mean = np.array(per_channel_mean).astype(np.float32)\n\n # Per channel mean is in range [-1,1]\n if (_per_channel_mean >= -1.0 - 1e-7).all() and (_per_channel_mean <= 1.0 + 1e-7).all():\n normalized_img_array -= _per_channel_mean\n # Per channel mean is in range [0, 255]\n elif (_per_channel_mean >= 0.0).all() and (_per_channel_mean <= 255.0).all():\n normalized_img_array -= np_from_255_to_normalized(_per_channel_mean)\n else:\n raise ValueError('Per channel mean is in unknown range: {}'.format(_per_channel_mean))\n\n # Additionally, you ideally would like to divide by the sttdev of\n # that feature or pixel as well if you want to normalize each feature\n # value to a z-score.\n if per_channel_stddev is not None:\n _per_channel_stddev = np.array(per_channel_stddev).astype(np.float32)\n\n # Per channel stddev is in range [-1, 1]\n if (_per_channel_stddev >= -1.0 - 1e-7).all() and (_per_channel_stddev <= 1.0 + 1e-7).all():\n normalized_img_array /= _per_channel_stddev\n # Per channel stddev is in range [0, 255]\n elif (_per_channel_stddev >= 0.0).all() and (_per_channel_stddev <= 255.0).all():\n normalized_img_array /= np_from_255_to_normalized(_per_channel_stddev)\n else:\n raise ValueError('Per-channel stddev is in unknown range: {}'.format(_per_channel_stddev))\n\n if clamp_to_range:\n min_val = np.min(normalized_img_array)\n max_val = np.max(normalized_img_array)\n\n if min_val < -1.0 or max_val > 1.0:\n print 'WARNING: Values outside of range [-1.0, 1.0] were found after normalization - clipping: [{}, {}]'.format(min_val, max_val)\n normalized_img_array = np.clip(normalized_img_array, -1.0, 1.0, out=normalized_img_array)\n\n # Sanity check for the image values, we shouldn't have any NaN or inf values\n if np.any(np.isnan(normalized_img_array)):\n raise ValueError('NaN values found in image after normalization')\n\n if np.any(np.isinf(normalized_img_array)):\n raise ValueError('Inf values found in image after normalization')\n\n return normalized_img_array", "def normalize_data(data, n=1):\n\n if isinstance(data, str):\n # TODO: could Antti comment on this?\n # numpy array initialization works unintuitively with strings\n data = np.array([[data]], dtype=object)\n else:\n data = np.atleast_1d(data)\n\n if data.ndim == 1:\n if data.shape[0] == n:\n data = data[:, None]\n else:\n data = data[None, :]\n if n > 1:\n data = np.vstack((data, ) * n)\n else:\n if data.shape[0] != n:\n data = data[None, :]\n if n > 1:\n data = np.vstack((data, ) * n)\n return data", "def _partial_flatten_and_normalize(x):\n x = np.reshape(x, (x.shape[0], -1))\n return (x - np.mean(x)) / np.std(x)" ]
[ "0.61362606", "0.60389656", "0.58118457", "0.5734301", "0.5696832", "0.5670947", "0.5670829", "0.5670006", "0.56443065", "0.5637183", "0.56079644", "0.55675006", "0.5559034", "0.55089456", "0.5506106", "0.5494193", "0.54827696", "0.5472908", "0.54428035", "0.54304826", "0.54268885", "0.53581226", "0.535042", "0.52944744", "0.5291196", "0.5289186", "0.5285938", "0.52779835", "0.5254062", "0.5238026", "0.5234786", "0.52157825", "0.52098185", "0.5202345", "0.5191524", "0.5183862", "0.5181672", "0.518144", "0.5180578", "0.51614225", "0.51542825", "0.5149061", "0.514514", "0.5132892", "0.510015", "0.5099883", "0.5091622", "0.5085645", "0.5080604", "0.5064447", "0.5052229", "0.5051817", "0.50517005", "0.50508", "0.50471544", "0.5039188", "0.5038498", "0.5015885", "0.5007682", "0.5006808", "0.49864462", "0.49797273", "0.49776107", "0.49759564", "0.49699962", "0.49611014", "0.49593386", "0.49586597", "0.4950634", "0.49368826", "0.49259466", "0.49031237", "0.49028435", "0.48984852", "0.48913425", "0.48878926", "0.48851845", "0.4879296", "0.48747453", "0.4870547", "0.48644012", "0.48619205", "0.48568463", "0.4845729", "0.4839411", "0.4838374", "0.4836196", "0.48350173", "0.48345584", "0.48313212", "0.48312688", "0.4830904", "0.48196062", "0.48178163", "0.4816597", "0.4805261", "0.48022056", "0.48001403", "0.47989812", "0.47982064" ]
0.7882431
0
Determine whether `item` specifies a complete slice of array with the given `shape`. Used to optimize __setitem__ operations on the Chunk class.
def is_total_slice(item, shape: Tuple[int]) -> bool: # N.B., assume shape is normalized if item == Ellipsis: return True if item == slice(None): return True if isinstance(item, slice): item = (item,) if isinstance(item, tuple): return all( ( isinstance(it, slice) and ((it == slice(None)) or ((it.stop - it.start == sh) and (it.step in [1, None]))) ) for it, sh in zip(item, shape) ) else: raise TypeError("expected slice or tuple of slices, found %r" % item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, item):\n if len(item) != len(self.sizes):\n raise ValueError('Point dimension does not match grid dimension')\n for i in range(len(self.sizes)):\n if not 1 <= item[i] < self.sizes[i] - 1:\n return False\n return True", "def roi_is_full(roi, shape):\n def slice_full(s, n):\n return s.start in (0, None) and s.stop in (n, None)\n\n if isinstance(roi, slice):\n roi = (roi,)\n shape = (shape,)\n\n return all(slice_full(s, n) for s, n in zip(roi, shape))", "def __contains__(self, item):\n try:\n pos = Vec2(*item)\n return pos.x >= self.origin.x and pos.y >= self.origin.y \\\n and pos.x < self.origin.x + self.size.x \\\n and pos.y < self.origin.y + self.size.y\n except TypeError:\n return False", "def __getitem__(self, item):\n # type (Any) -> Any\n # Workaround for Arrow bug that segfaults on empty slice.\n # This is fixed in Arrow master, will be released in 0.10\n if isinstance(item, slice):\n start = item.start or 0\n stop = item.stop if item.stop is not None else len(self.data)\n stop = min(stop, len(self.data))\n step = item.step if item.step is not None else 1\n # Arrow can't handle slices with steps other than 1\n # https://issues.apache.org/jira/browse/ARROW-2714\n if step != 1:\n arr = np.asarray(self)[item]\n # ARROW-2806: Inconsistent handling of np.nan requires adding a mask\n if pa.types.is_integer(self.dtype.arrow_dtype) or pa.types.is_floating(\n self.dtype.arrow_dtype\n ):\n mask = pd.isna(arr)\n else:\n mask = None\n return type(self)(pa.array(arr, type=self.dtype.arrow_dtype, mask=mask))\n if stop - start == 0:\n return type(self)(pa.array([], type=self.data.type))\n elif isinstance(item, Iterable):\n if not is_array_like(item):\n item = np.array(item)\n if is_integer_dtype(item):\n return self.take(item)\n elif is_bool_dtype(item):\n indices = np.array(item)\n indices = np.argwhere(indices).flatten()\n return self.take(indices)\n else:\n raise IndexError(\n \"Only integers, slices and integer or boolean arrays are valid indices.\"\n )\n elif is_integer(item):\n if item < 0:\n item += len(self)\n if item >= len(self):\n return None\n value = self.data[item]\n if isinstance(value, pa.ChunkedArray):\n return type(self)(value)\n else:\n return value.as_py()", "def is_satisfied(self, item: Product) -> bool:\n return item.size == self.size", "def __getitem__(self, item: SliceLike):\n\n if item == Ellipsis:\n return JaggedArray(data=self.data[...], shape=self.shape[...])\n elif isinstance(item, slice):\n # slow but works\n return self.__class__.from_aoa(self.to_aoa()[item])\n else:\n return self.data[slice(*self._cumsum[item : item + 2])].reshape(\n self.shape[:, item]\n )", "def __getitem__(self, item):\n if isinstance(item, slice):\n start = item.start or 0\n stop = item.stop if item.stop is not None else len(self.data)\n stop = min(stop, len(self.data))\n if stop - start == 0:\n return type(self)(xnd.xnd([], type=self.data.type))\n\n elif isinstance(item, Iterable):\n if not is_array_like(item):\n item = np.array(item)\n if is_integer_dtype(item):\n return self.take(item)\n elif is_bool_dtype(item):\n indices = np.array(item)\n indices = np.argwhere(indices).flatten()\n return self.take(indices)\n else:\n raise IndexError(\n \"Only integers, slices and integer or boolean \\\n arrays are valid indices.\"\n )\n\n elif is_integer(item):\n if item < 0:\n item += len(self)\n if item >= len(self):\n return None\n else:\n\n return self.data[item]\n\n value = self.data[item]\n return type(self)(value)", "def __contains__(self, item: 'BoundingBox2D') -> bool:\n top_left_inside = item.xmin >= self.xmin and item.ymin >= self.ymin\n bottom_right_inside = item.xmax <= self.xmax and item.ymax <= self.ymax\n return top_left_inside and bottom_right_inside", "def _is_dim_removed_by_splitting(cls, graph: NNCFGraph, node: NNCFNode) -> Optional[int]:\n split_axis = None\n if isinstance(node.layer_attributes, GetItemLayerAttributes):\n input_edge = graph.get_input_edges(node)[0]\n input_shape = input_edge.tensor_shape\n parent_node = input_edge.from_node\n child_nodes = graph.get_next_nodes(parent_node)\n child_attributes = [cnode.layer_attributes for cnode in child_nodes]\n all_getitem = all(isinstance(ca, GetItemLayerAttributes) for ca in child_attributes)\n assert all_getitem, \"currently supported only case with all __getitem__ on branches\"\n all_int_keys = all(isinstance(ca.key, int) for ca in child_attributes)\n # currently supported only case __getitem__ with single int, no slices\n if not all_int_keys:\n return None\n all_keys = set(ca.key for ca in child_attributes)\n split_dim = input_shape[0]\n if all_keys == set(range(split_dim)):\n split_axis = 0\n return split_axis", "def verify_structure(memlen, itemsize, ndim, shape, strides, offset):\n if offset % itemsize:\n return False\n if offset < 0 or offset + itemsize > memlen:\n return False\n if any(v % itemsize for v in strides):\n return False\n if ndim <= 0:\n return ndim == 0 and not shape and not strides\n if 0 in shape:\n return True\n imin = sum(strides[j] * (shape[j] - 1) for j in range(ndim) if strides[\n j] <= 0)\n imax = sum(strides[j] * (shape[j] - 1) for j in range(ndim) if strides[\n j] > 0)\n return 0 <= offset + imin and offset + imax + itemsize <= memlen", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def verify_structure(memlen, itemsize, ndim, shape, strides, offset):\n if offset % itemsize:\n return False\n if offset < 0 or offset+itemsize > memlen:\n return False\n if any(v % itemsize for v in strides):\n return False\n\n if ndim <= 0:\n return ndim == 0 and not shape and not strides\n if 0 in shape:\n return True\n\n imin = sum(strides[j]*(shape[j]-1) for j in range(ndim)\n if strides[j] <= 0)\n imax = sum(strides[j]*(shape[j]-1) for j in range(ndim)\n if strides[j] > 0)\n\n return 0 <= offset+imin and offset+imax+itemsize <= memlen", "def IsItemVisible(self, item):\r\n\r\n # An item is only visible if it's not a descendant of a collapsed item\r\n parent = item.GetParent()\r\n\r\n while parent:\r\n \r\n if not parent.IsExpanded():\r\n return False\r\n \r\n parent = parent.GetParent()\r\n \r\n startX, startY = self.GetViewStart()\r\n clientSize = self.GetClientSize()\r\n\r\n rect = self.GetBoundingRect(item)\r\n \r\n if not rect:\r\n return False\r\n if rect.GetWidth() == 0 or rect.GetHeight() == 0:\r\n return False\r\n if rect.GetBottom() < 0 or rect.GetTop() > clientSize.y:\r\n return False\r\n if rect.GetRight() < 0 or rect.GetLeft() > clientSize.x:\r\n return False\r\n\r\n return True", "def is_slice(self) -> bool:\n return self._is_slice", "def can_grow(self, item):\n raise NotImplementedError('Child class must implement can_grow')", "def check_and_image_shape(item: ValueType, shape: List) -> List:\n if len(item.shape) > 0:\n item = str(item[0])\n if item.endswith(('.jpg', '.jpeg', '.png')):\n import cv2\n im = cv2.imread(item)\n if im is not None:\n return list(im.shape)\n return shape", "def has_shape(node):\n allowed_shapes = (\n pm.nt.Mesh,\n pm.nt.NurbsCurve,\n pm.nt.NurbsSurface\n )\n\n has_it = False\n\n children = node.getChildren()\n while len(children) and not has_it:\n child = children.pop(0)\n if isinstance(child, allowed_shapes):\n has_it = True\n break\n children += child.getChildren()\n\n return has_it", "def IsVisible(self, item):\r\n\r\n # An item is only visible if it's not a descendant of a collapsed item\r\n parent = item.GetParent()\r\n\r\n while parent:\r\n \r\n if not parent.IsExpanded():\r\n return False\r\n \r\n parent = parent.GetParent()\r\n \r\n startX, startY = self.GetViewStart()\r\n clientSize = self.GetClientSize()\r\n\r\n rect = self.GetBoundingRect(item)\r\n \r\n if not rect:\r\n return False\r\n if rect.GetWidth() == 0 or rect.GetHeight() == 0:\r\n return False\r\n if rect.GetBottom() < 0 or rect.GetTop() > clientSize.y:\r\n return False\r\n if rect.GetRight() < 0 or rect.GetLeft() > clientSize.x:\r\n return False\r\n\r\n return True", "def __getitem__(self, item: slice | tuple):\n if isinstance(item, slice):\n start, stop = item.start, item.stop\n if start is None:\n start = 0\n if stop is None:\n stop = maxsize\n if start > stop:\n raise IndexError(\"make sure start <= stop\")\n return self.query(Orthotope([Interval(start, stop)]))\n elif isinstance(item, tuple):\n pass\n else:\n raise TypeError(f\"unrecognized index {item}\")", "def is_sequence(item):\n return (not hasattr(item, \"strip\") and\n (hasattr(item, \"__getitem__\") or hasattr(item, \"__iter__\")))", "def owns_shape(self, pm_shape):\n all_shapes = [part.pm_visible_shape for part in self.parts]\n if pm_shape in all_shapes:\n return True\n return False", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> \"PolygonMasks\":\n if isinstance(item, int):\n selected_polygons = [self.polygons[item]]\n elif isinstance(item, slice):\n selected_polygons = self.polygons[item]\n elif isinstance(item, list):\n selected_polygons = [self.polygons[i] for i in item]\n elif isinstance(item, torch.Tensor):\n # Polygons is a list, so we have to move the indices back to CPU.\n if item.dtype == torch.bool:\n assert item.dim() == 1, item.shape\n item = item.nonzero().squeeze(1).cpu().numpy().tolist()\n elif item.dtype in [torch.int32, torch.int64]:\n item = item.cpu().numpy().tolist()\n else:\n raise ValueError(\"Unsupported tensor dtype={} for indexing!\".format(item.dtype))\n selected_polygons = [self.polygons[i] for i in item]\n return PolygonMasks(selected_polygons)", "def is_shape_dynamic(shape: trt.Dims) -> bool:\n return any([is_dimension_dynamic(dim) for dim in shape])", "def __getitem__(\n self, item: Union[int, slice, torch.BoolTensor]\n ) -> \"DensePoseChartPredictorOutput\":\n if isinstance(item, int):\n return DensePoseChartPredictorOutput(\n coarse_segm=self.coarse_segm[item].unsqueeze(0),\n fine_segm=self.fine_segm[item].unsqueeze(0),\n u=self.u[item].unsqueeze(0),\n v=self.v[item].unsqueeze(0),\n )\n else:\n return DensePoseChartPredictorOutput(\n coarse_segm=self.coarse_segm[item],\n fine_segm=self.fine_segm[item],\n u=self.u[item],\n v=self.v[item],\n )", "def _apply_item(self, item: Item) -> bool:\n return False", "def _is_extended_slice(s):\n\n return s.step is not None and s.step != 1", "def isItem(self):\n return _libsbml.Unit_isItem(self)", "def arrayContains(arr, item):\n\tcontains = True\n\ttry:\n\t\tarr.index(item)\n\texcept ValueError:\n\t\tcontains = False\n\treturn contains", "def is_item_complete(self, item):\n return (item.get('id') and\n item.get('name') and\n 'description' in item and\n 'image' in item)", "def __contains__(self, item: Any) -> bool:\n return item in self.item_to_index", "def is_full_axis_mask(index, axis_length):\n if isinstance(index, slice):\n return index == slice(None) or (\n isinstance(axis_length, int)\n and compute_sliced_len(index, axis_length) == axis_length\n )\n return (\n hasattr(index, \"__len__\")\n and isinstance(axis_length, int)\n and len(index) == axis_length\n )", "def __contains__(self, item):\n if not item.liidx in self.reg:\n return False\n for l, r in self.reg[item.liidx]: # pragma: no branch\n if l <= item.clidx <= r:\n return True\n elif l > item.clidx: # pragma: no cover\n return False", "def process_slice(slc, shape, n):\n if not isinstance(slc, tuple):\n slc = (slc,)\n slc = list(slc)\n ndim = len(shape) - n\n assert ndim >= 0\n shape_idx = 0\n for slice_idx, s in enumerate(slc):\n if s == nax:\n continue\n if shape[shape_idx] == 1:\n if type(s) == int:\n slc[slice_idx] = 0\n else:\n slc[slice_idx] = slice(None)\n shape_idx += 1\n if shape_idx != ndim:\n raise IndexError('Must have %d terms in the slice object' % ndim)\n return extend_slice(tuple(slc), n)", "def __getitem__(self, item):\n if isinstance(item, slice):\n item = replace_slice_defaults(item)\n\n self.update_rows(item)\n\n return Index(self.expr,\n self.dtype)\n elif isinstance(item, LazyResult):\n if str(item.weld_type) != str(numpy_to_weld_type('bool')):\n raise ValueError('expected LazyResult of bool to filter Index elements')\n\n return Index(weld_filter(self.expr,\n item.expr),\n self.dtype)\n else:\n raise TypeError('expected slice or LazyResult of bool in Index.__getitem__')", "def has_insert(self, shape):\n for insert in self.inserts:\n if insert.shape == shape:\n return True\n return False", "def __contains__(self, item: Any) -> bool:\n if self.is_empty():\n return False\n elif self._first == item:\n return True\n else:\n return self._rest.__contains__(item)\n # Equivalently, item in self._rest", "def tile_is_out_of_borders(index, shape):\n return index[0] < 0 or index[1] < 0 or index[0] >= shape[0] or index[1] >= shape[1]", "def test_slice_other_dimension(self):\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,)+shape[1:])", "def __contains__(self, item):\n if self.is_empty():\n return False\n elif self._first == item:\n return True\n else:\n return self._rest.__contains__(item)\n # Equivalently, item in self._rest", "def HasChildren(self, item):\r\n\r\n return len(item.GetChildren()) > 0", "def __contains__(self, item, key):\n ndx = self._findPostion(key)\n return ndx is not None", "def __getitem__(self, item):\n # type (Any) -> Any\n value = self.data[item]\n if isinstance(value, pa.ChunkedArray):\n return type(self)(value)\n else:\n return value", "def __getitem__(self, item):\n if isinstance(item, slice):\n if not (item.start or item.stop or item.step):\n return self.values()\n raise NotImplementedError(\"Implement if needed, e.g. for [:]\")\n else:\n try:\n # fast path, a good amount of accesses will want to fetch a specific dataset it knows exists in the\n # file, there's therefor no point in checking whether item is a group or a node or even worse recursing\n # in case when item contains '/'. In most cases read_hdf5 will grab the correct data straight away and\n # if not we will still check thoroughly below. Since list_nodes()/list_groups() each open the\n # underlying file once, this reduces the number of file opens in the most-likely case from 2 to 1 (1 to\n # check whether the data is there and 1 to read it) and increases in the worst case from 1 to 2 (1 to\n # try to read it here and one more time to verify it's not a group below).\n return read_hdf5(self.file_name, title=self._get_h5_path(item))\n except (ValueError, OSError, RuntimeError, NotImplementedError):\n # h5io couldn't find a dataset with name item, but there still might be a group with that name, which we\n # check in the rest of the method\n pass\n\n item_lst = item.split(\"/\")\n if len(item_lst) == 1 and item_lst[0] != \"..\":\n # if item in self.list_nodes() we would have caught it in the fast path above\n if item in self.list_groups():\n with self.open(item) as hdf_item:\n obj = hdf_item.copy()\n if self._is_convertable_dtype_object_array(obj):\n obj = self._convert_dtype_obj_array(obj)\n return obj\n raise ValueError(\n \"Unknown item: {} {} {}\".format(item, self.file_name, self.h5_path)\n )\n else:\n if (\n item_lst[0] == \"\"\n ): # item starting with '/', thus we have an absoute HDF5 path\n item_abs_lst = os.path.normpath(item).replace(\"\\\\\", \"/\").split(\"/\")\n else: # relative HDF5 path\n # The self.h5_path is an absolute path (/h5_path/in/h5/file), however, to\n # reach any directory super to root, we start with a\n # relative path = ./h5_path/in/h5/file and add whatever we get as item.\n # The normpath finally returns a path to the item which is relative to the hdf-root.\n item_abs_lst = (\n os.path.normpath(os.path.join(\".\" + self.h5_path, item))\n .replace(\"\\\\\", \"/\")\n .split(\"/\")\n )\n # print('h5_path=', self.h5_path, 'item=', item, 'item_abs_lst=', item_abs_lst)\n if item_abs_lst[0] == \".\" and len(item_abs_lst) == 1:\n # Here, we are asked to return the root of the HDF5-file. The resulting self.path would be the\n # same as the self.file_path and, thus, the path of the pyiron Project this HDF5-file belongs to:\n return self.create_project_from_hdf5()\n elif item_abs_lst[0] == \"..\":\n # Here, we are asked to return a path super to the root of the HDF5-file, a.k.a. the path of it's\n # pyiron Project, thus we pass the relative path to the pyiron Project to handle it:\n return self.create_project_from_hdf5()[\"/\".join(item_abs_lst)]\n else:\n hdf_object = self.copy()\n hdf_object.h5_path = \"/\".join(item_abs_lst[:-1])\n return hdf_object[item_abs_lst[-1]]", "def is_container(item):\n if isinstance(item, str):\n return False\n elif hasattr(item, \"__iter__\"):\n return True\n\n return False", "def is_allergic_to(self, item):\n if item in self.list:\n return True\n else:\n return False", "def __contains__(self, item):\n index = bisect_left(self.sequence, item)\n if (len(self.sequence) != index) and (self.sequence[index] == item):\n return True\n return False", "def is_array(a):\n try:\n shape = a.shape\n return len(shape) >= 1\n except AttributeError:\n return False", "def isValidPcbShape(g):\n return g.GetShape() != pcbnew.S_SEGMENT or g.GetLength() > 0", "def __contains__(self, point):\n for component, dim in zip(point, self.dimensions):\n if component not in dim:\n return False\n return True", "def contains(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n if not self.arr[val]:\n return False\n else:\n return True", "def chunk(self, shape, split) -> NotImplementedError:\n raise NotImplementedError()", "def is_satisfied(self, item: Product) -> bool:\n return item.colour == self.colour", "def test_slice_zero_length_dimension(self):\n for i, shape in enumerate([(0,), (0, 3), (0, 2, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, shape)\n out = dset[:]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, shape)\n if len(shape) > 1:\n out = dset[:, :1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape[:2], (0, 1))", "def _array_name_implies_ND_slice(self, array_name):\n for v in self._split_arrays.values():\n if array_name in v:\n return True\n\n generic_match = re.findall(\"^(.+)_[xyz]$\", array_name)\n loadable_keys = self.loadable_keys()\n keys = list(self.keys())\n if len(generic_match) == 1 and generic_match[0] not in self._split_arrays:\n return generic_match[0] in loadable_keys or generic_match[0] in keys\n return False", "def _is_ragged_in_1st_dim_only(value: Union[np.ndarray, list]) -> bool:\n if isinstance(value, np.ndarray) and value.dtype != np.dtype(\"O\"):\n return False\n else:\n\n def extract_dims(v):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n s = np.shape(v)\n return s[0], s[1:]\n\n dim1, dim_other = zip(*map(extract_dims, value))\n return len(set(dim1)) > 1 and len(set(dim_other)) == 1", "def __contains__(self, item):\n\n if self.is_view:\n return item in self._view\n return item in self._storage", "def check_bounds (position, size):\n \n for item in position:\n # checks whether item is out of bounds\n if item < 0 or item >= size:\n return False\n return True", "def has_item(self, item: Inventory) -> bool:\n return (item.pk,) in self.orderitem_set.values_list('item')", "def IsExpanded(self, item):\r\n\r\n return item.IsExpanded()", "def check(self,item):\r\n raise AbstractError\r\n return False", "def isNotContainedWithin(self,shape):\n return not self.isContainedWithin(shape)", "def hasItem(self, path): \n\t\treturn (path in self.items and self.items[path])", "def is_contiguous(arr):\n mn, mx = min(arr), max(arr)\n s = sum(arr)\n sn = (mn*(mn-1))/2 if mn!=0 else 0\n sx = (mx*(mx+1))/2\n if s == sx-sn:\n return True\n else:\n return False", "def __contains__(self, item: OidValue) -> bool:\n item = to_int_tuple(item)\n return self.value == item[0 : len(self.value)]", "def is_satisfied(self, item: Any) -> bool:", "def __contains__(self, item):\n # return item in self._items\n # leverage improved performance index() function\n try:\n self.index(item)\n return True\n except ValueError:\n return False", "def _is_all_input_shape_generalize(input_shape_tuple):\n for elem in input_shape_tuple:\n if not is_shape_unknown(elem.shape):\n return False\n return True", "def __contains__(self, item: object) -> bool:\n return item in self._used", "def is_valid(i, j, shape):\n return i >= 0 and j >= 0 and i < shape[0] and j < shape[1]", "def __contains__(self, item: object) -> bool:\n val = conv_kv(item) # type: ignore\n for fixup in self._mapping._fixup.values():\n if fixup.value == val:\n return True\n return False", "def isItemVisible(self, itemName, touchType=True, contentType=None, index=1, area=None, refresh=True, containerObject=None, relatedAreaEnd=None):\r\n\r\n item=0\r\n result = None\r\n\r\n #if index==None:\r\n # area = (0,0,self.getScreenWidth(),self.getScreenHeight())\r\n\r\n item=self.searchItem(itemName, touchType, contentType, index=index, area=area, refresh=refresh, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n\r\n if item:\r\n if index is not None:\r\n x,y,w,h = [int(p) for p in item.getAttribute('coords').split(\",\")]\r\n\r\n x_center = x+(w/2)\r\n y_center = y+(h/2)\r\n\r\n topLeft = (x,y)\r\n topRight = (x+w,y)\r\n bottomLeft = (x,y+h)\r\n bottomRight = (x+w,y+h)\r\n top = int(item.getAttribute('top'))\r\n bottom = int(item.getAttribute('bottom'))\r\n left = int(item.getAttribute('left'))\r\n right = int(item.getAttribute('right'))\r\n\r\n if (item.getAttribute('visible') != 'hidden') and (self.__isPointOnScreen(topLeft) or self.__isPointOnScreen(topRight) or self.__isPointOnScreen(bottomLeft) or self.__isPointOnScreen(bottomRight) or (top < 0 and bottom > self.getScreenHeight()) or (left < 0 and right > self.getScreenWidth()) ):\r\n result = (self.VISIBLE,(x_center,y_center), item)\r\n else:\r\n result = (self.HIDDEN,(x_center,y_center), item)\r\n else:\r\n resultList = []\r\n for i in item:\r\n x,y,w,h = [int(p) for p in i.getAttribute('coords').split(\",\")]\r\n\r\n x_center = x+(w/2)\r\n y_center = y+(h/2)\r\n\r\n topLeft = (x,y)\r\n topRight = (x+w,y)\r\n bottomLeft = (x,y+h)\r\n bottomRight = (x+w,y+h)\r\n\r\n top = int(i.getAttribute('top'))\r\n bottom = int(i.getAttribute('bottom'))\r\n left = int(i.getAttribute('left'))\r\n right = int(i.getAttribute('right'))\r\n\r\n if (i.getAttribute('visible') != 'hidden') and (self.__isPointOnScreen(topLeft) or self.__isPointOnScreen(topRight) or self.__isPointOnScreen(bottomLeft) or self.__isPointOnScreen(bottomRight) or (top < 0 and bottom > self.getScreenHeight()) or (left < 0 and right > self.getScreenWidth()) ):\r\n return (self.VISIBLE,(x_center,y_center), i)\r\n else:\r\n resultList.append((self.HIDDEN,(x_center,y_center), i))\r\n result = resultList[0]\r\n else:\r\n result = (self.NOT_FOUND,(0,0), item)\r\n\r\n return result", "def _apply_item(self, item: Item) -> bool:\n if self.locked:\n self.__locked = item.item_type != self.__key\n return not self.locked", "def __getitem__(self, pos: int) -> bool:", "def is_valid(self) -> bool:\r\n try:\r\n self.shape\r\n return True\r\n except ValueError:\r\n return False", "def Item(self) -> bool:", "def test_slice_of_length_zero(self):\n for i, shape in enumerate([(3,), (2, 2,), (2, 1, 5)]):\n dset = self.f.create_dataset('x%d'%i, data=np.zeros(shape, int), maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[1:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (0,)+shape[1:])", "def any(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.any(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.any(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.any(part.view(ndarray), *args, **kwargs))", "def _validate_item(self, item):\n try:\n self._validate_item_required_attrs(item=item)\n self._validate_item_link(item=item)\n except ValueError as ex:\n logger.info(str(ex))\n return False\n\n return True", "def __contains__(self, item):\n return item in self.default_dataset", "def has_datapoint_with_dim(fake_ingest, key, value):\n return has_datapoint_with_all_dims(fake_ingest, {key: value})", "def IsItem3State(self, item):\r\n\r\n return item.Is3State()", "def _check_item(proc):\n if not isinstance(proc, ReadingProc):\n raise WrongReadingSetItem( 'You try to interract with item(s) of wrong type(s), '\n 'e.g. not describing processes.')", "def one_head_test(self, item):\n v = [i for i, j in self.A if j == item]\n return len(v) == 0", "def _maybe_to_slice(loc):\n if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:\n return loc\n\n loc = lib.maybe_indices_to_slice(loc, len(self))\n if isinstance(loc, slice):\n return loc\n\n mask = np.empty(len(self), dtype=\"bool\")\n mask.fill(False)\n mask[loc] = True\n return mask", "def _supports(self, item):\n return type(item) in Result.SUPPORTED_DATA", "def has_item(self, item):\n return item in self.set", "def has_item(self, item):\n return item in self.set", "def check_has_dims(hdr):\n try:\n return (hdr['startX'], hdr['startY'])\n except KeyError:\n return False", "def XCAFDoc_ShapeTool_IsShape(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsShape(*args)", "def check_shape(self):\r\n if np.array(self.img).shape != (1536, 2048, 3):\r\n raise BadShape", "def __contains__(self, item: Any) -> bool:\n try:\n self.__getattr__(item)\n return True\n except RuntimeError:\n return False", "def __getitem__(self, item_slice):\n assert len(item_slice) == 4\n if K.image_dim_ordering() == 'th':\n return item_slice\n else:\n return tuple([item_slice[0], item_slice[2], item_slice[3], item_slice[1]])", "def __contains__(self, item):\n if item in self._parents:\n return True\n else:\n return False", "def in_bounds(state, map_shape):\n return 0 <= state[0] < map_shape[0] and 0 <= state[1] < map_shape[1]", "def is_full(self):\n return self.idx == self.len", "def _check_shape(input_shape):\n msg = ('Input to SpatialExpansion must be 4D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_features), '\n 'but received shape: {}'.format(input_shape))\n assert len(input_shape) == 4, msg", "def is_subdivision_available(self, position: np.ndarray) -> bool:\n\t\tsubdivision_x_index = int(position[0]) // self.square_subdivision_length\n\t\tsubdivision_y_index = int(position[1]) // self.square_subdivision_length\n\t\treturn self.plane_subdivisions_availability[subdivision_x_index, subdivision_y_index] == 1", "def __getitem__(self, item):\n if isinstance(item, slice):\n return list(itertools.islice(self._queue, item.start, item.stop, item.step))\n else:\n return self._queue[item]", "def valid(self, pos):\n\t\tpos = Point(pos)\n\t\treturn 0 <= pos.x < self.dims.width and 0 <= pos.y < self.dims.height" ]
[ "0.5930391", "0.5894886", "0.5701367", "0.56653273", "0.5539224", "0.54814374", "0.54136163", "0.54108244", "0.5387186", "0.5382678", "0.5344155", "0.5341929", "0.53009504", "0.5299111", "0.52924687", "0.5279965", "0.52784073", "0.52331346", "0.5224626", "0.51978904", "0.51833797", "0.5111612", "0.5106401", "0.5096898", "0.5058394", "0.5024388", "0.50093496", "0.5006892", "0.49975714", "0.49951485", "0.4989877", "0.4961322", "0.4925775", "0.49198663", "0.49106443", "0.48957443", "0.48877653", "0.48637393", "0.4851697", "0.48181507", "0.48159245", "0.48123258", "0.47949547", "0.47897282", "0.4780013", "0.47753456", "0.47740528", "0.4765586", "0.474841", "0.4744087", "0.47433165", "0.4741494", "0.47381908", "0.47371355", "0.47283435", "0.47279578", "0.47166893", "0.47053966", "0.46930337", "0.46708652", "0.46647975", "0.46505705", "0.46480334", "0.4643822", "0.46400806", "0.463954", "0.46255106", "0.4619932", "0.46160555", "0.46160072", "0.46114334", "0.460025", "0.45936653", "0.459345", "0.45877445", "0.45819238", "0.45781386", "0.45715597", "0.45567104", "0.45465735", "0.45446327", "0.45444256", "0.45423347", "0.45421013", "0.453283", "0.4530626", "0.45200038", "0.45200038", "0.45156822", "0.45140436", "0.4510183", "0.450437", "0.45037895", "0.45006725", "0.44985977", "0.44952235", "0.44927135", "0.44915867", "0.44880915", "0.44815022" ]
0.780188
0
Make several attempts to invoke the callable. If one of the given exceptions is raised, wait the given period of time and retry up to the given number of retries.
def retry_call( callabl: Callable, args=None, kwargs=None, exceptions: Tuple[Any, ...] = (), retries: int = 10, wait: float = 0.1, ) -> Any: if args is None: args = () if kwargs is None: kwargs = {} for attempt in range(1, retries + 1): try: return callabl(*args, **kwargs) except exceptions: if attempt < retries: time.sleep(wait) else: raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def call_with_retries(function, max_retries=10,\n exception_types=(Exception),\n _args=(), _kwargs={}):\n assert max_retries >= 0\n\n retries = 0\n last_exc = Exception('Unknown exception')\n while retries <= max_retries:\n try:\n return function(*_args, **_kwargs)\n except exception_types as exc:\n retries += 1\n wait = 2.0 ** retries * 0.1 + (random.randint(0, 1000) / 1000)\n time.sleep(wait)\n last_exc = exc\n raise last_exc", "def retry(callback, retries, sleep=0.5, catch=Exception, *args, **kwargs):\n r = 0\n while r < retries:\n r += 1\n try:\n return callback(*args, **kwargs)\n except catch as c:\n if r == retries:\n raise c\n else:\n time.sleep(r * sleep)", "def retry_exception(num, delay, func, exception=Exception, *args, **kwargs):\n i = 0\n while i <= num:\n try:\n func(*args, **kwargs)\n time.sleep(delay)\n except exception: # pylint: disable=broad-except\n i += 1\n continue\n return\n raise StopIteration(\"Function did not finished successfully\")", "def retry_multi(max_retries=5):\n\n def retry(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n num_retries = 0\n ret = None\n while num_retries <= max_retries:\n try:\n ret = func(*args, **kwargs)\n break\n except Exception as e:\n logger.exception(e)\n if num_retries == max_retries:\n raise\n num_retries += 1\n time.sleep(5)\n return ret\n\n return wrapper\n\n return retry", "def retry(func, *args, **kwargs):\n\n # config\n backoff = 1. + random.random() * 0.1\n max_backoff = 32\n max_retries = 5\n\n # try to make the request\n for i in range(max_retries):\n try:\n # return on success\n return func(*args, **kwargs)\n except Exception:\n # sleep on failure\n time.sleep(backoff)\n backoff = 2 * backoff if backoff < max_backoff else backoff\n \n # max retries exceeded\n raise RuntimeError('The connection to the server timed out.')", "def retry(times: int, except_callback: Optional[Callable[..., Any]] = None):\n\n def wrap(func):\n @wraps(func)\n def retry_it(*args, **kwargs):\n nonlocal times\n if times < 0: # forever\n times = 1 << 32\n\n for i in range(1, times + 1):\n try:\n r = func(*args, **kwargs)\n return r\n except Exception as err:\n if except_callback is not None:\n except_callback(err, i)\n\n if i == times:\n raise err\n\n return retry_it\n\n return wrap", "def _Retry(func, *args, **kwargs):\n retries = _RETRIES\n while True:\n try:\n return func(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except\n retries -= 1\n if retries > 0:\n log.info('Exception {e} thrown in {func}. Retrying.'.format(\n e=e, func=func.__name__))\n time.sleep(1)\n else:\n raise e", "def call_with_retries(function, retry_count, retry_delay):\n logger.info(\"Calling function: %s with retry count: %s, retry_delay: %s\",\n function, retry_count, retry_delay)\n for retry in range(1, int(retry_count) + 1):\n logger.info(\"Attempt number: %s\", retry)\n try:\n return function()\n # pylint: disable=broad-except\n except Exception as verify_exception:\n logger.info(\"Verify exception: %s\", verify_exception)\n time.sleep(float(retry_delay))\n if retry > int(retry_count):\n logger.info(\"Exceeded max retries! Reraising last exception\")\n raise\n assert False, \"Should never get here.\"", "def retry(nattempts, exception=None):\n \n def tryIt(func):\n def wrapper(*args, **kwargs):\n attempts = 0\n while attempts < nattempts - 1:\n try:\n return func(*args, **kwargs)\n except (exception if exception is not None else Exception):\n attempts += 1\n return func(*args, **kwargs)\n return wrapper\n return tryIt", "def retry(exceptions=Exception, tries=3, delay=1):\n\n def retry_decorator(func):\n def func_wrapper(*args, **kwargs):\n _tries = tries\n while _tries:\n try:\n return func(*args, **kwargs)\n except exceptions as e:\n _tries -= 1\n if not _tries:\n raise\n\n time.sleep(delay)\n\n return func_wrapper\n\n return retry_decorator", "def retry_on_exception(func, num_tries=40, period_in_seconds=DEFAULT_PERIOD,\n error=None):\n for x in range(num_tries):\n try:\n return func()\n except Exception as e:\n if error and e.error_code == error:\n logging.info(\"Skipping on exception %s\" % error)\n break\n if x == (num_tries - 1):\n raise RuntimeError(\"Failed on %d tries: %s\" % (num_tries, e))\n logging.info(\"Got exception %s on try number %s...\" % (e, x))\n\n time.sleep(period_in_seconds)", "def retry_on_exception(func, max_attempts=5, ignored_exceptions=(StaleElementReferenceException, InvalidElementStateException)):\r\n attempt = 0\r\n while attempt < max_attempts:\r\n try:\r\n return func()\r\n except ignored_exceptions:\r\n world.wait(1)\r\n attempt += 1\r\n\r\n assert_true(attempt < max_attempts, 'Ran out of attempts to execute {}'.format(func))", "def retry(times: int, on_exceptions: List[Exception]):\n def decorator(function: Callable):\n @wraps(function)\n def wrapper(*args, **kwargs):\n raised = []\n for _ in range(times):\n try:\n return function(*args, **kwargs)\n except Exception as ex:\n raised.append(ex)\n if type(ex) not in on_exceptions:\n raise RetryError(\n 'An unexpected error occurred while calling the function '+\n f'{function.__name__}.'\n ) from ex\n raise raised.pop()\n return wrapper\n return decorator", "def retry(retry_times=3, interval=0.5, exceptions=Exception):\n def _decorator(func):\n @wraps(func)\n def _wrapped_func(*args, **kwargs):\n for attempt in range(1, retry_times + 1):\n try:\n return func(*args, **kwargs)\n except exceptions: # pylint: disable=broad-except\n if attempt < retry_times:\n logger.debug(\"%s failed in No. %d attempt\", func, attempt)\n import traceback\n import time\n logger.debug(traceback.format_exc())\n time.sleep(interval)\n else:\n raise # End of retry. Re-raise the exception as-is.\n return _wrapped_func\n return _decorator", "def retry(maxRetries, *exceptions):\n def _doDecoration(fn):\n def _doRetry(*args, **kwargs):\n retries = 0\n while retries <= maxRetries:\n try:\n return fn(*args, **kwargs)\n except tuple(exceptions):\n retries +=1\n if retries > maxRetries:\n raise\n \n return _doRetry\n return _doDecoration", "def execute_with_retry(f, args=[], kwargs={}, retry_on=(Exception,),\n max_tries=3, sleep=5):\n attempt = 0\n result = None\n while attempt < max_tries:\n attempt += 1\n try:\n result = f(*args, **kwargs)\n break\n except retry_on, e:\n if attempt >= max_tries:\n raise e\n log(\"Function call failed ('%s': %i/%i).\\n\"\n \"Reason: %s.\\n\"\n \"Wait for %i sec before retry...\"\n % (f.__name__, attempt, max_tries, str(e), sleep))\n time.sleep(sleep)\n return result", "def retry(times):\n return repeat_with_success_at_least(times, 1)", "def wait_for(func, expected_exceptions=(), retries=60):\n\n retries = int(retries)\n for retry in range(1, retries + 1):\n try:\n return_value = func()\n if return_value:\n break\n\n except expected_exceptions:\n if retry == retries:\n raise\n else:\n pass\n\n time.sleep(1)\n\n return return_value", "def _RunWithRetries(self, callback, error_matcher):\n for i in xrange(FLAGS.gcloud_num_retries):\n try:\n return callback()\n except Exception as e: # pylint: disable=broad-except\n if not error_matcher(e):\n raise\n # Use randomized exponential backoff, like methods in\n # googleapiclient.http.\n retry_seconds = random.random() * 2**(i + 1)\n logging.warning('Request raised an error: %s\\n'\n 'Will retry in %f seconds.', e, retry_seconds)\n time.sleep(retry_seconds)\n\n return callback()", "def retry(func):\n # ... retry MAX_RETRIES times\n # ...\n # make sure you include this for testing:\n # except Exception as exc:\n # print(exc)\n # ...\n # and use wraps to preserve docstring\n #\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n tries = MAX_RETRIES\n while tries > 0:\n try:\n return func(*args, **kwargs)\n except Exception as err:\n print(err)\n\n tries -= 1\n\n raise MaxRetriesException\n\n return wrapper", "def __try_to_run(self, func, args, exceptions=(OSError, PhueException), amount=10,\n sleep=2):\n for _ in range(amount):\n try:\n return func(*args)\n except exceptions as e:\n log.debug(f'Try to run failed, sleeping {sleep}s, {e}')\n time.sleep(sleep)\n log.info(f'Failed to run {func.__name__} with args {args}')\n return None", "def retry(self, times):\n return Retry((requests.ConnectionError, requests.Timeout), times)", "def _retry(method, max_tries=5, backoff_s=1):\n\n @wraps(method)\n def method_with_retries(self, *args, **kwargs):\n try_count = 0\n while try_count < max_tries:\n try:\n return method(self, *args, **kwargs)\n except BrokenPipeError:\n logger.warning(\"Caught a BrokenPipeError. Retrying.\")\n try_count += 1\n if try_count < max_tries:\n self._construct_clients()\n time.sleep(backoff_s)\n else:\n raise\n\n return method_with_retries", "def retry_query(tries=3, delay=1):\n\n def retry_wrapper(func):\n \"\"\"Wrapper function.\n :params func: function to call\n :return: wrapper function\n \"\"\"\n\n @functools.wraps(func)\n def inner(*args, **kwargs):\n \"\"\"Inner wrapper function\n :params *args: list of different arguments\n *kwargs: dictionary of different arguments\n \"\"\"\n\n mtries = tries\n mdelay = delay\n\n while mtries:\n try:\n return func(*args, **kwargs)\n except Exception: # pylint: disable=broad-except\n if mtries:\n time.sleep(mdelay)\n mtries -= 1\n\n return inner\n\n return retry_wrapper", "def retryCall(fn, args=None, keywordArgs=None, failureTester=None, sleepManager=None):\n sleepManager = sleepManager or time.SleepManager()\n while True:\n try:\n result = yield fn(*args, **keywordArgs)\n defer.returnValue(result)\n except Exception: # pylint: disable=W0703\n failureTester(failure.Failure())\n yield sleepManager.sleep()", "def retry(\n self, n: int, /, *args, error: Catchable = Exception, sleep=None, **kwargs\n ) -> \"fn\":\n\n func = self._mod.retry(n, self, error=error, sleep=sleep)\n return func(*args, **kwargs)", "def retry(exception_to_check=AssertionError, tries=100, delay=.1):\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 0:\n try:\n return f(*args, **kwargs)\n except exception_to_check, e:\n log.info('%s, Retrying in %s seconds...' % (str(e), mdelay))\n time.sleep(mdelay)\n mtries -= 1\n try_time = float(tries*delay)\n raise exception_to_check('tried for %1.1f seconds, gave up' % try_time)\n return f_retry\n return deco_retry", "def _poll_until_no_exception(self, fn, expected_exception, max_retries=20, retry_delay=3):\n\n for i in range(max_retries):\n try:\n return fn()\n except expected_exception:\n if i == max_retries - 1:\n raise\n if self.is_live:\n time.sleep(retry_delay)", "def retry(exception_to_check, tries=4, delay=0.5, backoff=2):\n\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n try_one_last_time = True\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n try_one_last_time = False\n break\n except exception_to_check, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n logging.warning(msg)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n if try_one_last_time:\n return f(*args, **kwargs)\n return\n return f_retry\n return deco_retry", "def retry(exceptions, tries=3, delay=2, _logger=logger()):\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except exceptions as e:\n msg = '{}, Retrying in {} seconds...'.format(e, mdelay)\n _logger.warning(msg)\n time.sleep(mdelay)\n mtries -= 1\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(action, attempts=5, sleeptime=60, max_sleeptime=5 * 60,\n sleepscale=1.5, jitter=1, retry_exceptions=(Exception,),\n cleanup=None, args=(), kwargs={}, log_args=True):\n assert callable(action)\n assert not cleanup or callable(cleanup)\n\n action_name = getattr(action, '__name__', action)\n if log_args and (args or kwargs):\n log_attempt_args = (\"retry: calling %s with args: %s,\"\n \" kwargs: %s, attempt #%d\",\n action_name, args, kwargs)\n else:\n log_attempt_args = (\"retry: calling %s, attempt #%d\",\n action_name)\n\n if max_sleeptime < sleeptime:\n log.debug(\"max_sleeptime %d less than sleeptime %d\",\n max_sleeptime, sleeptime)\n\n n = 1\n for _ in retrier(attempts=attempts, sleeptime=sleeptime,\n max_sleeptime=max_sleeptime, sleepscale=sleepscale,\n jitter=jitter):\n try:\n logfn = log.info if n != 1 else log.debug\n logfn_args = log_attempt_args + (n, )\n logfn(*logfn_args)\n return action(*args, **kwargs)\n except retry_exceptions:\n log.debug(\"retry: Caught exception: \", exc_info=True)\n if cleanup:\n cleanup()\n if n == attempts:\n log.info(\"retry: Giving up on %s\", action_name)\n raise\n continue\n finally:\n n += 1", "def _poll_until_exception(self, fn, expected_exception, max_retries=20, retry_delay=3):\n\n for _ in range(max_retries):\n try:\n fn()\n if self.is_live:\n time.sleep(retry_delay)\n except expected_exception:\n return\n\n self.fail(\"expected exception {expected_exception} was not raised\")", "def exp_backoff_fn(fn, *args):\n if not on_win:\n return fn(*args)\n\n import time\n import errno\n max_tries = 6 # max total time = 6.4 sec\n for n in range(max_tries):\n try:\n result = fn(*args)\n except (OSError, IOError) as e:\n if e.errno in (errno.EPERM, errno.EACCES):\n if n == max_tries - 1:\n raise Exception(\"max_tries=%d reached\" % max_tries)\n time.sleep(0.1 * (2 ** n))\n else:\n raise e\n else:\n return result", "def do_a_thing_with_retries(function, max_tries: int, *args: Any) -> Any:\n\n run_results = {}\n func_return = None\n has_fails = False\n\n for tries in range(max_tries):\n try:\n if(tries < max_tries):\n if (args):\n func_return = function(*args)\n else:\n func_return = function()\n run_results[tries] = {\"status\": \"success\", \"error\": \"\"}\n break\n except Exception as ex:\n run_results[tries] = {\"status\": \"fail\", \"error\": ex}\n has_fails = True\n next\n \n if has_fails:\n raise FunctionFailedException(\"Failed\", run_results)\n return func_return", "def ensure_redis_call(f, *args, **kwargs):\n attempts = kwargs.pop('attempts', 5)\n\n for i in six.moves.range(attempts + 1):\n try:\n return f(*args, **kwargs)\n\n except (ConnectionError, TimeoutError) as e:\n if i == attempts:\n raise\n else:\n wait = 2 ** i\n msg = (\n 'Will reattempt to execute {} with args={} kwargs={} '\n 'after {} seconds due to exception {}: {}'\n ''.format(f, args, kwargs, wait, type(e).__name__, e)\n )\n print(msg)\n time.sleep(wait)", "def retry(ExceptionToCheck, tries=3, delay=3, backoff=2):\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n logging.warning('%s, Retrying in %d seconds...', str(e), mdelay)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry\n\n return deco_retry", "def _timeout_retry(func, *args, **kwargs):\n tried = kwargs.pop('_____retires', 0)\n try:\n q = func(*args, **kwargs)\n except (TimeoutError, TableParseError) as exc:\n if tried >= MAX_RETRIES_TIMEOUT:\n raise TimeoutError(f'TimeOut obtained in {MAX_RETRIES_TIMEOUT}'\n ' tries, aborting.') from exc\n return _timeout_retry(func, *args, **kwargs, _____retires=tried+1)\n return q", "def retry(exception, tries=10, delay=1, backoff=2, max_delay=30):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n m_tries, m_delay = tries, delay\n while m_tries > 1:\n try:\n return f(*args, **kwargs)\n except exception:\n time.sleep(min(m_delay, max_delay))\n m_tries -= 1\n m_delay *= backoff\n return f(*args, **kwargs)\n return f_retry # true decorator\n return deco_retry", "def retry(tries, delay=3, backoff=2):\n tries = math.floor(tries)\n if tries < 0:\n raise ValueError(\"tries must be 0 or greater\")\n\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay # make mutable\n err = None\n while mtries > 0:\n print(\"Trial Number:\" + str(mtries))\n try:\n rv = f(*args, **kwargs)\n except DBException as e:\n print(\"Retry..\")\n mtries -= 1 # consume an attempt\n time.sleep(mdelay) # wait...\n mdelay += backoff # make future wait longer\n err = e\n\n # except Exception as e:\n # print(str(e))\n # mtries -= 1 # consume an attempt\n # time.sleep(mdelay) # wait...\n # mdelay += backoff # make future wait longer\n # err = e\n else:\n return rv\n raise err\n\n return f_retry # true decorator -> decorated function\n\n return deco_retry # @retry(arg[, ...]) -> true decorator", "def retry(retries, task_f, check_f=bool, wait_f=None):\n for attempt in range(retries):\n ret = task_f()\n if check_f(ret):\n return ret\n if attempt < retries - 1 and wait_f is not None:\n wait_f(attempt)\n raise RetryException(\"Giving up after {} failed attempt(s)\".format(retries))", "def url_socket_retry(func, *args, **kw):\n min_delay = 1\n max_delay = 32\n max_attempts = 4\n\n for idx, delay in enumerate(\n backoff_delays(min_delay, max_delay, jitter=True)):\n try:\n return func(*args, **kw)\n except HTTPError as err:\n if not (err.status == 503 and 'Slow Down' in err.reason):\n raise\n if idx == max_attempts - 1:\n raise\n except URLError as err:\n if not isinstance(err.reason, socket.error):\n raise\n if err.reason.errno not in (104, 110):\n raise\n if idx == max_attempts - 1:\n raise\n\n time.sleep(delay)", "def retry(attempts_number, delay=0, step=0, max_delay=-1,\n retry_on=Exception, logger=None):\n\n def decorator(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n current_logger = logger\n\n attempts = 1\n retry_delay = delay\n\n try:\n if isinstance(args[0], object):\n current_logger = args[0].get_logger()\n except (AttributeError, IndexError):\n pass\n\n if isinstance(retry_on, (types.FunctionType,\n types.MethodType,)):\n catch_strategy = CatchFunctionStrategy(retry_on)\n else:\n catch_strategy = CatchExceptionStrategy(retry_on)\n\n while attempts <= attempts_number or attempts_number < 0:\n try:\n return func(*args, **kwargs)\n except Exception as e:\n if catch_strategy.need_to_retry(e):\n if attempts >= attempts_number >= 0:\n raise\n elif current_logger:\n retry_count = \"inf\" if attempts_number < 0 \\\n else attempts_number - 1\n\n current_logger.warning(\n \"Retry: Call to %(fn)s failed due to \"\n \"%(exc_class)s: %(exc)s, retry \"\n \"attempt #%(retry_no)s/\"\n \"%(retry_count)s after %(delay)ss\",\n dict(fn=func.__name__,\n exc=str(e),\n retry_no=attempts,\n exc_class=e.__class__.__name__,\n retry_count=retry_count,\n delay=retry_delay))\n time.sleep(retry_delay)\n attempts += 1\n retry_delay += step\n if 0 <= max_delay < retry_delay:\n retry_delay = max_delay\n else:\n raise\n return wrapper\n return decorator", "def _retry_on_exception(\n exception: Union[Exception, Tuple[Exception]],\n regex: Optional[str] = None,\n max_retries: int = MAX_POLLS,\n retry_interval_s: int = POLL_INTERVAL,\n):\n\n def dec(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n def try_catch_exc():\n try:\n value = func(*args, **kwargs)\n return value\n except Exception as e:\n if not isinstance(e, exception) or (\n regex and not re.search(regex, str(e))\n ):\n raise e\n return e\n\n for _ in range(max_retries):\n ret = try_catch_exc()\n if not isinstance(ret, Exception):\n break\n time.sleep(retry_interval_s)\n if isinstance(ret, Exception):\n raise ret\n return ret\n\n return wrapper\n\n return dec", "def retry(func, repeat=3, delay=tickTime * 2):\n\twhile repeat:\n\t\tresult = func()\n\n\t\tif result is None and delay and repeat != 1:\n\t\t\tsleep(delay)\n\n\t\telse:\n\t\t\treturn result\n\n\t\trepeat -= 1", "def retry(tries, delay=3, backoff=2, except_on=(Exception, )):\n\n tries = math.floor(tries)\n\n def decorator(f):\n def f_retry(*args, **kwargs):\n return function_retry(\n tries, delay, backoff, except_on, f, *args, **kwargs)\n return f_retry # true decorator -> decorated function\n return decorator # @retry(arg[, ...]) -> true decorator", "def retry(ExceptionToCheck, tries=3, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print msg\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(exception, tries=10, delay=3, backoff=0.1):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except exception as ex:\n print \"{0}, Retrying in {1} seconds...\".format(ex, mdelay)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n return f_retry # true decorator\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print(msg)\n sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def decorated(instance, *args, **kwargs):\n cfg = instance._retry_config\n remaining_tries = cfg.retry_attempts\n current_wait = cfg.retry_wait\n retry_backoff = cfg.retry_backoff\n last_error = None\n\n while remaining_tries >= 0:\n try:\n return fun(instance, *args, **kwargs)\n except socket.error as e:\n last_error = e\n instance._retry_logger.warning('Connection failed: %s', e)\n\n remaining_tries -= 1\n if remaining_tries == 0:\n # Last attempt\n break\n\n # Wait a bit\n time.sleep(current_wait)\n current_wait *= retry_backoff\n\n # All attempts failed, let's raise the last error.\n raise last_error", "def retry_request(self, method, action, body=None,\r\n headers=None, params=None):\r\n max_attempts = self.retries + 1\r\n for i in range(max_attempts):\r\n try:\r\n return self.do_request(method, action, body=body,\r\n headers=headers, params=params)\r\n except exceptions.ConnectionFailed:\r\n # Exception has already been logged by do_request()\r\n if i < self.retries:\r\n _logger.debug(_('Retrying connection to Neutron service'))\r\n time.sleep(self.retry_interval)\r\n\r\n raise exceptions.ConnectionFailed(reason=_(\"Maximum attempts reached\"))", "def retrying(func, *retry_args, **retry_kwargs):\n yield retriable(*retry_args, **retry_kwargs)(func)", "def _retry(func):\n @wraps(func)\n def _retry_wrapper(self, *args, **kwargs):\n error_message = \"\"\n for retry in range(self.retries + 1):\n try:\n return func(self, *args, **kwargs)\n except ValueError as err:\n error_message = str(err)\n raise ValueError(str(error_message))\n return _retry_wrapper", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print msg\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print(msg)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\r\n def deco_retry(f):\r\n\r\n @wraps(f)\r\n def f_retry(*args, **kwargs):\r\n mtries, mdelay = tries, delay\r\n while mtries > 1:\r\n try:\r\n return f(*args, **kwargs)\r\n except ExceptionToCheck as e:\r\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\r\n if logger:\r\n logger.warning(msg)\r\n else:\r\n print (msg)\r\n time.sleep(mdelay)\r\n mtries -= 1\r\n mdelay *= backoff\r\n return f(*args, **kwargs)\r\n return f_retry # true decorator\r\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\r\n def deco_retry(f):\r\n\r\n @wraps(f)\r\n def f_retry(*args, **kwargs):\r\n mtries, mdelay = tries, delay\r\n while mtries > 1:\r\n try:\r\n return f(*args, **kwargs)\r\n except ExceptionToCheck, e:\r\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\r\n if logger:\r\n logger.warning(msg)\r\n else:\r\n print msg\r\n time.sleep(mdelay)\r\n mtries -= 1\r\n mdelay *= backoff\r\n return f(*args, **kwargs)\r\n\r\n return f_retry # true decorator\r\n\r\n return deco_retry", "def patiently(function, exception_to_catch, exception_to_raise=None, msg=\"\", seconds=15):\n if not exception_to_raise:\n exception_to_raise = exception_to_catch\n attempts = 15\n i = 0\n print \"Execute function %s patiently\" % function.__name__\n while i < attempts:\n try:\n return function()\n except exception_to_catch:\n print i\n sleep( float(seconds) / float(attempts))\n i += 1\n raise exception_to_raise(msg)", "def run(self, func, *args, **kwargs):\n try:\n ret = func(*args, **kwargs)\n\n if not self._should_handle_return(ret, *args, **kwargs):\n return ret\n except Exception as e:\n if not self._should_handle_error(e, *args, **kwargs):\n raise\n\n if self._on_delay is None:\n raise MaxRetryError('Maximum number of retries exceeded for {0}'.format(self._get_func_name(func)))\n\n retries = 0\n for delay in self._get_delay_sequence(*args, **kwargs):\n retries += 1\n\n if self._should_handle_retry(False):\n self._call_with_sig(self._on_retry, self._sig_retry, (delay, retries), *args, **kwargs)\n\n sleep(delay / 1000)\n\n if self._should_handle_retry(True):\n self._call_with_sig(self._on_retry, self._sig_retry, (delay, retries), *args, **kwargs)\n\n try:\n ret = func(*args, **kwargs)\n\n if not self._should_handle_return(ret, *args, **kwargs):\n return ret\n except Exception as e:\n if not self._should_handle_error(e, *args, **kwargs):\n raise\n\n raise MaxRetryError('Maximum number of retries exceeded for {0}'.format(self._get_func_name(func)))", "def retry_wait_backoff(fn_check, fail_msg, max_wait=20):\n sleep_time = 0.1\n total_waited = 0.0\n while total_waited < max_wait:\n if fn_check():\n break\n log.info('{0}, retrying in {1:.2f}s'.format(fail_msg, sleep_time))\n total_waited += sleep_time\n time.sleep(sleep_time)\n sleep_time = min(sleep_time * 2, 5, max_wait - total_waited)\n else:\n raise TimeoutError('{0} after {1:.2f}s'.format(fail_msg, max_wait))", "def retry_wait_backoff(fn_check, fail_msg, max_wait=20):\n sleep_time = 0.1\n total_waited = 0.0\n while total_waited < max_wait:\n if fn_check():\n break\n log.info('{0}, retrying in {1:.2f}s'.format(fail_msg, sleep_time))\n total_waited += sleep_time\n time.sleep(sleep_time)\n sleep_time = min(sleep_time * 2, 5, max_wait - total_waited)\n else:\n raise TimeoutError('{0} after {1:.2f}s'.format(fail_msg, max_wait))", "def test_retry(self):\n retries = [0]\n max_tries = 5\n\n @retry(Exception, max_retries=5)\n def f():\n retries[0] += 1\n raise Exception(\"Faulty function\")\n\n with self.assertRaises(Exception):\n f()\n\n self.assertEqual(max_tries, retries[0])", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n\tdef deco_retry(f):\n\t\t@wraps(f)\n\t\tdef f_retry(*args, **kwargs):\n\t\t\tmtries, mdelay = tries, delay\n\t\t\twhile mtries > 1:\n\t\t\t\ttry:\n\t\t\t\t\treturn f(*args, **kwargs)\n\t\t\t\texcept ExceptionToCheck, e:\n\t\t\t\t\tmsg = \"func: '{}' > exc: {}, Retrying in {} seconds...\".format(str(f.__name__), str(e), mdelay)\n\t\t\t\t\tif logger:\n\t\t\t\t\t\tlogger.warning(msg)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint msg\n\t\t\t\t\ttime.sleep(mdelay)\n\t\t\t\t\tmtries -= 1\n\t\t\t\t\tmdelay *= backoff\n\t\t\treturn f(*args, **kwargs)\n\t\treturn f_retry\t# true decorator\n\treturn deco_retry", "def auto_retry(fun):\n\n @functools.wraps(fun)\n def decorated(instance, *args, **kwargs):\n \"\"\"Wrapper around a decorated function.\"\"\"\n cfg = instance._retry_config\n remaining_tries = cfg.retry_attempts\n current_wait = cfg.retry_wait\n retry_backoff = cfg.retry_backoff\n last_error = None\n\n while remaining_tries >= 0:\n try:\n return fun(instance, *args, **kwargs)\n except socket.error as e:\n last_error = e\n instance._retry_logger.warning('Connection failed: %s', e)\n\n remaining_tries -= 1\n if remaining_tries == 0:\n # Last attempt\n break\n\n # Wait a bit\n time.sleep(current_wait)\n current_wait *= retry_backoff\n\n # All attempts failed, let's raise the last error.\n raise last_error\n\n return decorated", "def _retry(self, f):\n count = 0\n while True:\n try:\n return f()\n # http://initd.org/psycopg/docs/module.html#psycopg2.DatabaseError\n # handle operational error - memory allocation, unexpected disconnect\n except psycopg2.OperationalError, oe:\n count += 1\n if count < self._max_retries:\n LOGGER.warn(\"Transient Error Received %s \", oe)\n time.sleep(self._retry_period)\n else:\n LOGGER.error(\"Unrecoverable Error %s\", oe)\n raise oe\n # other database errors - integrity, internal, programming error etc\n except psycopg2.DatabaseError, de:\n LOGGER.error(\"Database Error %s\", de)\n raise de\n # interface errors\n except psycopg2.Error, e:\n raise e", "def retryable(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n retries = 0\n max_retries = kwargs.get(\"max_retries\", DEFAULT_RETRIES)\n backoff = kwargs.get(\"backoff\", default_backoff)\n while retries <= max_retries:\n try:\n return func(*args, **kwargs)\n except IntegrityError:\n logging.debug(\n \"Race-condition caught? ({}/{} retries)\".format(retries, max_retries)\n )\n if retries >= max_retries:\n logging.error(f\"Unable to execute {func}, max retries exceeded\")\n raise\n retries += 1\n backoff(retries, max_retries)\n\n return wrapper", "def retry_on_refuse(f, *args, **kwargs):\n i = 0\n while True:\n try:\n i += 1\n f(*args, **kwargs)\n break\n except (OSError, socket.error) as e:\n if e.args[0] != socket.errno.ECONNREFUSED or i > 10000:\n raise\n else:\n time.sleep(0.001)", "def retry(retries=5):\n\n def decorator(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n count = 0\n while True:\n try:\n return fn(*args, **kwargs)\n except (\n BadRequest,\n BadResponseException,\n ReadTimeout,\n RequestException,\n TraktBadGateway,\n TraktUnavailable,\n TraktInternalException,\n ) as e:\n if count == retries:\n logger.error(f\"Error: {e}\")\n\n if isinstance(e, BadResponseException):\n logger.error(f\"Details: {e.details}\")\n if isinstance(e, TraktInternalException):\n logger.error(f\"Error message: {e.error_message}\")\n\n logger.error(\n \"API didn't respond properly, script will abort now. Please try again later.\"\n )\n logger.error(\n f\"Last call: {fn.__module__}.{fn.__name__}({args[1:]}, {kwargs})\"\n )\n exit(1)\n\n seconds = 1 + count\n count += 1\n logger.warning(\n f\"{e} for {fn.__module__}.{fn.__name__}(), retrying after {seconds} seconds (try: {count}/{retries})\"\n )\n sleep(seconds)\n\n return wrapper\n\n return decorator", "def _retry_request(self, request, timeout=2, attempts=3):\n import googleapiclient\n\n try:\n return request.execute()\n except BrokenPipeError as ex:\n if attempts > 0:\n time.sleep(timeout)\n return self._retry_request(request, timeout * 2, attempts - 1)\n raise ex\n except googleapiclient.errors.HttpError as ex:\n log_verbose_traceback(ex)\n raise ex\n except Exception as ex:\n log_verbose_traceback(ex)\n raise ex", "def retryConnection(func, *args, **kw):\n try_num = 0\n while True:\n try:\n web_file = func(*args, **kw)\n break\n except IOError:\n try_num += 1\n if try_num >= retry:\n raise IOError\n if p_out:\n print(\"retry connection...\")\n os.system('sleep ' + str(sleeptime))\n return web_file", "def wrapped_fn(*args, **kwargs):\n for delay in delays():\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except)\n if is_retriable is None:\n continue\n\n if is_retriable(e):\n time.sleep(delay)\n else:\n raise\n return fn(*args, **kwargs)", "def wrap(fn):\n\n def wrapped_fn(*args, **kwargs):\n \"\"\"The actual wrapper function that applies the retry logic.\"\"\"\n for delay in delays():\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except)\n if is_retriable is None:\n continue\n\n if is_retriable(e):\n time.sleep(delay)\n else:\n raise\n return fn(*args, **kwargs)\n\n return wrapped_fn", "def _call_list_with_retry(urls, method, payload, headers, auth, proxies,\n retries, timeout=None, stream=None):\n if timeout is None:\n if method == 'get':\n timeout = _DEFAULT_REQUEST_TIMEOUT\n else:\n timeout = None\n\n retry = 0\n attempts = []\n while True:\n success, response = _call_list(\n urls, method, payload, headers, auth, proxies,\n timeout=(_DEFAULT_CONNECT_TIMEOUT + retry, timeout),\n stream=stream\n )\n if success:\n return response\n\n retry += 1\n attempts.extend(response)\n if retry >= retries:\n raise MaxRequestRetriesError(attempts)\n\n time.sleep(1)", "def retry(times,function,*args,**kwargs):\n app_conntext=kwargs.get(\"app-context\")\n while times>0:\n times-=1\n try:\n if app_conntext:\n with app_conntext.app_context() as actx:\n function(*args)\n else:\n function(*args,**kwargs)\n print(function.__name__,\"Ran sucessfully with args \",args,\" and \",kwargs)\n return True # no exception\n except Exception as e:\n #can be any exception so try again\n print(function.__name__,\"exception \",e)\n print(function.__name__,\"failed with \",args,\" and \",kwargs)\n return False", "def Retry_timer(interval=3, retry_times=3):\n def retry_timer(func):\n @wraps(func)\n def wrapper(count=1, interval=interval, retry_times=retry_times, *args, **kwargs):\n try:\n logger.debug(f'Try func:{func.__name__} {count} times.')\n return func(*args, **kwargs)\n except Exception as e:\n logger.warning(f'There have some error: {e}')\n count += 1\n if count <= retry_times:\n logger.debug(f'Will retry in {interval} sec.')\n time.sleep(interval)\n return wrapper(count=count, interval=interval, retry_times=retry_times, *args, **kwargs)\n else:\n logger.critical(f'Failed to execute func:{func.__name__}')\n return wrapper\n return retry_timer", "def set_retry_timeout(self, retry_timeout):", "def _retry(*, task, signature_kwargs, retries):\n if retries < MAX_RETRIES:\n step = task.signature(**signature_kwargs)\n queue = step.options.get(\"queue\", task.queue)\n step.options[\"queue\"] = f\"{queue}-delay\"\n step.kwargs[\"retries\"] = retries + 1\n on_commit(step.apply_async)\n else:\n raise MaxRetriesExceededError", "async def _retry_get(url: str, retries: int, **kwargs):\r\n retries -= 1\r\n if retries >= 0:\r\n logger.warning(\r\n f\"Retrying request to {url}. Retries remaining: {retries}\")\r\n return await asyncio.create_task(\r\n self.get(url, retries, **kwargs))\r\n logger.error(\r\n f\"Max retries exceeded: {url}. URL can not be navigated.\")", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n try_one_last_time = True\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n try_one_last_time = False\n break\n except ExceptionToCheck, e:\n if logger:\n msg = getMessage(\"en\", \"retrying-notification\").format(str(e), mdelay)\n logger.warning(msg)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n if try_one_last_time:\n return f(*args, **kwargs)\n return\n return f_retry # true decorator\n return deco_retry", "def test_retry_other_exception():\n\n exceptions_in = [\n RuntimeError(\"what?\"),\n NotImplementedError(\"how?\"),\n RuntimeError(\"no!\"),\n ]\n actual = []\n expected = [1.0, 1.5, 2.25]\n\n def sleep(wait: float):\n actual.append(wait)\n\n @retry(\n (NotImplementedError, RuntimeError),\n retries=4,\n delay=1.0,\n backoff=1.5,\n sleep=sleep,\n )\n def explode():\n raise exceptions_in.pop()\n\n try:\n explode()\n raise AssertionError(\"IndexError expected\")\n except IndexError:\n assert actual == expected", "def default_backoff(retries, max_retries):\n\n time.sleep(random.random() * (max_retries - retries) / max_retries * 2)", "def retry_task(func):\n\n @wraps(func)\n def wrapper(task, *args, **kwargs):\n retries = task.request.retries\n exponential = 2 ** retries\n exponential_backoff = random.randint(exponential, exponential * 2)\n try:\n result = func(task, *args, **kwargs)\n except Exception as e:\n logger.error(\n f\"Retriying {task.request.id} after {exponential_backoff} seconds\"\n )\n raise task.retry(countdown=exponential_backoff, exc=e, max_retries=5)\n\n return result\n\n return wrapper", "async def _wait_retry(self) -> None:\n # Sleep 2^tries + 0…tries*3 seconds between retries\n self.retry_task = asyncio.create_task(\n asyncio.sleep(2 ** min(9, self.tries) + random.randint(0, self.tries * 3))\n )\n await self.retry_task\n self.retry_task = None", "def wait_through_exception_then_return(\n exc, func, func_args=None, func_kwargs=None, timeout_secs=5, poll_secs=.5): # noqa\n\n start_time = datetime.now()\n curr_time = datetime.now()\n\n func_args = func_args or []\n func_kwargs = func_kwargs or {}\n\n while (curr_time - start_time).total_seconds() < timeout_secs:\n try:\n result = func(*func_args, **func_kwargs)\n return result\n except exc as wde:\n time.sleep(poll_secs)\n curr_time = datetime.now()\n raise SlimleafException(f'Exception {exc} did not subside as expected')", "def _retry_provider_call(self, func):\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n max_retries = 29\n attempts = 0\n while attempts < max_retries:\n try:\n return func(*args, **kwargs)\n except ClientError as e:\n attempts += 1\n raise RetryLimitExceededError(\n \"Exceeded request limit {} times. Aborting.\".format(max_retries)\n )\n return decorated", "def run_selenium_routine(func):\n retries = 0\n\n while retries < 3:\n try:\n return func()\n except Exception as ex:\n self.logger.error(\n 'Selenium error #%s: %s', retries + 1, ex,\n exc_info=True)\n\n retries += 1\n continue", "def _test_retry_after_unlimited_retry_error(self, exception):\r\n num_emails = 8\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n # Note that because celery in eager mode will call retries synchronously,\r\n # each retry will increase the stack depth. It turns out that there is a\r\n # maximum depth at which a RuntimeError is raised (\"maximum recursion depth\r\n # exceeded\"). The maximum recursion depth is 90, so\r\n # num_emails * expected_retries < 90.\r\n expected_retries = 10\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Cycle through N throttling errors followed by a success.\r\n get_conn.return_value.send_messages.side_effect = cycle(\r\n chain(repeat(exception, expected_retries), [None])\r\n )\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_nomax=(expected_retries * num_emails)\r\n )", "def _retry_refresh(wrapper, *a3, **k3):\n return func(wrapper, *a3, **k3)", "def retries(self, count: int):\n if count < 0:\n raise ValueError(\"negative\")\n\n self._retries = count", "def retry_until_timeout(\n exception: Type[Exception],\n item: Callable[..., Any],\n timeout: int,\n args: List[Any] = None,\n kwargs: Mapping[str, Any] = None,\n interval: float = 0.05,\n raise_on_timeout: bool = True,\n) -> Any:\n timeout_info = TimeoutExceptionInfo()\n end_time = timeout_info.started + timeout\n while True:\n try:\n res = item(*args or tuple(), **kwargs or {})\n except exception as exc:\n if time.time() < end_time:\n # no timeout yet\n time.sleep(interval)\n else:\n if raise_on_timeout:\n raise TimeoutException(\n \"Timeout waiting for {0}\"\n \" to return without {1}. {2}. {3}\".format(\n item.__name__,\n exception.__name__,\n timeout_info.msg(),\n str(exc),\n )\n )\n else:\n return None\n else:\n return res", "def try_n(\r\n n: int, sleep: Union[Callable[..., Any], int] = None, out: Any = Nothing\r\n) -> Any:\r\n ...", "def __init__(self, tries , exceptions=None, delay=0.01):\n self.tries = tries\n if exceptions is None:\n exceptions = Retry.default_exceptions\n self.exceptions = exceptions\n self.delay = delay", "def i2c_retry(n):\n def decorator(func):\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n for _ in range(n-1):\n try:\n return func(*args, **kwargs)\n except OSError:\n time.sleep(0.05) # <-- allow the I2C bus to chill-out before we try again\n return func(*args, **kwargs)\n\n return func_wrapper\n\n return decorator", "def call_api(func: Callable[..., requests.Response], args: dict) -> Dict:\n for _ in range(CONFIG['dojot']['api']['retries'] + 1):\n try:\n res = func(**args)\n res.raise_for_status()\n\n except Exception as exception:\n LOGGER.debug(str(exception))\n gevent.sleep(CONFIG['dojot']['api']['time'])\n\n else:\n return res.json()\n\n raise APICallError(\"exceeded the number of retries to {0}\".format(args['url']))", "def backoff(\n max_tries=constants.BACKOFF_DEFAULT_MAXTRIES,\n delay=constants.BACKOFF_DEFAULT_DELAY,\n factor=constants.BACKOFF_DEFAULT_FACTOR,\n exception_handler=always_retry,\n before_delay_handler=noop,\n after_delay_handler=noop):\n if max_tries <= 0:\n raise ValueError((\n 'Max tries must be greater than 0; got {!r}'\n ).format(max_tries))\n\n if delay <= 0:\n raise ValueError((\n 'Delay must be greater than 0; got {!r}'\n ).format(delay))\n\n if factor <= 1:\n raise ValueError((\n 'Backoff factor must be greater than 1; got {!r}'\n ).format(factor))\n\n def outter(f):\n def inner(*args, **kwargs):\n m_max_tries, m_delay = max_tries, delay # make mutable\n while m_max_tries > 0:\n try:\n retval = f(*args, **kwargs)\n except Exception as ex:\n m_max_tries -= 1 # consume an attempt\n if m_max_tries < 0:\n # run out of tries\n raise\n if exception_handler(ex):\n logger.info(\n (\n 'backoff retry for: %r (max_tries=%r, '\n 'delay=%r, factor=%r)'\n ),\n f,\n max_tries,\n delay,\n factor\n )\n before_delay_handler(ex)\n time.sleep(m_delay) # wait...\n after_delay_handler(ex)\n m_delay *= factor # make future wait longer\n else:\n # exception handler gave up\n raise\n else:\n # done without errors\n return retval\n return inner\n return outter", "def aws_cmd ( cmd, args, num_retries = 20 ) :\n while True :\n try :\n return cmd( *args )\n except :\n num_retries -= 1\n if num_retries < 1 :\n raise\n time.sleep( 1 )", "def test_run_max_retries():\n responses = [httpretty.Response(body=\"Internal Server Error\", status=500),\n httpretty.Response(body=\"Internal Server Error\", status=500),\n httpretty.Response(body=\"Internal Server Error\", status=500)]\n httpretty.register_uri(httpretty.GET, URL, responses=responses)\n with mock.patch('httsleep.main.sleep'):\n httsleep = HttSleep(URL, {'status_code': 200}, max_retries=2)\n with pytest.raises(StopIteration):\n httsleep.run()", "def request(self, *args, **kwargs):\n\n ratelimit_retries, temporary_error_retries, ident_retries = 0, 0, {}\n\n while True:\n try:\n try:\n return self._request(*args, **kwargs)\n except Exception as exc:\n self.error_processor(exc)\n raise\n\n except Retry as exc:\n ident_retries.setdefault(exc.retry_ident, 0)\n ident_retries[exc.retry_ident] += 1\n if ident_retries[exc.retry_ident] <= exc.retry_count:\n self.logger.warning('Retry(%s) after calls(%s/%s) since(%s) on: %s',\n ident_retries[exc.retry_ident], self.calls_count,\n self.calls_elapsed_seconds, self.first_call_time,\n exc.retry_ident)\n if exc.wait_seconds:\n self.sleep(exc.wait_seconds,\n log_reason='retry request: {}'.format(exc.retry_ident))\n else:\n raise self.RetryExceeded(\n exc.result, retry_ident=exc.retry_ident, retry_count=exc.retry_count)\n\n except RatelimitError as exc:\n ratelimit_retries += 1\n if ratelimit_retries <= self.ratelimit_retries:\n self.logger.warning('Retry(%s) after calls(%s/%s) since(%s) on error: %r',\n ratelimit_retries, self.calls_count,\n self.calls_elapsed_seconds, self.first_call_time, exc)\n self.sleep(exc.wait_seconds is not None and exc.wait_seconds\n or self.ratelimit_wait_seconds,\n log_reason='ratelimit wait')\n else:\n if ratelimit_retries - 1:\n raise self.RetryExceeded(exc, retry_count=ratelimit_retries - 1)\n raise\n\n except TemporaryError as exc:\n temporary_error_retries += 1\n if temporary_error_retries <= self.temporary_error_retries:\n self.logger.debug('Retry(%s) after calls(%s/%s) since(%s) on error: %r',\n temporary_error_retries, self.calls_count,\n self.calls_elapsed_seconds, self.first_call_time, exc)\n self.sleep(exc.wait_seconds is not None and exc.wait_seconds\n or self.temporary_error_wait_seconds,\n log_reason='temporary error wait')\n else:\n if temporary_error_retries - 1:\n raise self.RetryExceeded(exc, retry_count=temporary_error_retries - 1)\n raise", "def query_retry(self, f, *args, **kwargs):\n\n num_retries = CONF.watcher_datasources.query_max_retries\n timeout = CONF.watcher_datasources.query_timeout\n for i in range(num_retries):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n LOG.exception(e)\n self.query_retry_reset(e)\n LOG.warning(\"Retry {0} of {1} while retrieving metrics retry \"\n \"in {2} seconds\".format(i+1, num_retries, timeout))\n time.sleep(timeout)", "def retry(num=5):\n s = requests.Session()\n retries = Retry(total=num, backoff_factor=0.1,\n status_forcelist=[500, 502, 503, 504])\n s.mount('http://', HTTPAdapter(max_retries=retries))\n\n return s", "def retry(initial_delay,\n max_delay,\n factor=2.0,\n jitter=0.25,\n is_retriable=None):\n if factor < 1:\n raise ValueError('factor must be >= 1; was %f' % (factor,))\n\n if jitter >= 1:\n raise ValueError('jitter must be < 1; was %f' % (jitter,))\n\n # Generator to compute the individual delays\n def delays():\n delay = initial_delay\n while delay <= max_delay:\n yield delay * random.uniform(1 - jitter, 1 + jitter)\n delay *= factor\n\n def wrap(fn):\n \"\"\"Wrapper function factory invoked by decorator magic.\"\"\"\n\n def wrapped_fn(*args, **kwargs):\n \"\"\"The actual wrapper function that applies the retry logic.\"\"\"\n for delay in delays():\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except)\n if is_retriable is None:\n continue\n\n if is_retriable(e):\n time.sleep(delay)\n else:\n raise\n return fn(*args, **kwargs)\n\n return wrapped_fn\n\n return wrap" ]
[ "0.790463", "0.7765928", "0.7565526", "0.7538731", "0.75145954", "0.7512866", "0.7507025", "0.7504739", "0.74817264", "0.7449125", "0.7426998", "0.73940426", "0.7380012", "0.73608816", "0.7356993", "0.73083615", "0.7303313", "0.72958124", "0.7114819", "0.70131445", "0.70127916", "0.6995641", "0.6958797", "0.69552225", "0.6945945", "0.6944727", "0.69350576", "0.6929361", "0.6919916", "0.69184095", "0.6892953", "0.6864827", "0.68379664", "0.682513", "0.682451", "0.67784256", "0.67623794", "0.6750118", "0.6728307", "0.66824406", "0.66813165", "0.6624268", "0.66115725", "0.6604032", "0.6587645", "0.65583086", "0.6508103", "0.6500225", "0.649983", "0.649859", "0.64891183", "0.6480209", "0.64746976", "0.6465723", "0.6455848", "0.64531523", "0.643201", "0.6427333", "0.64153236", "0.64153236", "0.6354229", "0.6346307", "0.63445866", "0.6334495", "0.6331983", "0.633044", "0.6321162", "0.6299986", "0.6280067", "0.6272917", "0.6265431", "0.62595975", "0.6242535", "0.62271386", "0.62079006", "0.6200587", "0.61935586", "0.61836755", "0.61769825", "0.6160653", "0.6150492", "0.61249065", "0.61232406", "0.61141413", "0.61074954", "0.6102967", "0.60920084", "0.60817164", "0.6077051", "0.6073543", "0.60666984", "0.6049193", "0.6036813", "0.5993666", "0.5987521", "0.59521985", "0.59496003", "0.59480214", "0.59465754", "0.5940257" ]
0.7604029
2
Test if all the elements of an array are equivalent to a value. If `value` is None, then this function does not do any comparison and returns False.
def all_equal(value: Any, array: Any): if value is None: return False if not value: # if `value` is falsey, then just 1 truthy value in `array` # is sufficient to return False. We assume here that np.any is # optimized to return on the first truthy value in `array`. try: return not np.any(array) except (TypeError, ValueError): # pragma: no cover pass if np.issubdtype(array.dtype, np.object_): # we have to flatten the result of np.equal to handle outputs like # [np.array([True,True]), True, True] return all(flatten(np.equal(value, array, dtype=array.dtype))) else: # Numpy errors if you call np.isnan on custom dtypes, so ensure # we are working with floats before calling isnan if np.issubdtype(array.dtype, np.floating) and np.isnan(value): return np.all(np.isnan(array)) else: # using == raises warnings from numpy deprecated pattern, but # using np.equal() raises type errors for structured dtypes... return np.all(value == array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_equal(array):\n if not array:\n raise ValueError(\"Array is empty\")\n\n first_item = array[0]\n\n if any(item != first_item for item in array):\n return False\n\n return True", "def has_equal_values_vec(x):\n return jnp.all(x == x[0])", "def check_array(self, array: ArrayData, value: List[int]):\n assert self._call is not None, f\"You must first call a function before checking its return values!\"\n \"\"\" Checks that when this function is called, we have not already assembled and run the test. \"\"\"\n assert not self._has_executed, f\"Test has already been assembled and run!\"\n assert len(value) > 0, \"Array to compare against has to contain at least one element.\"\n assert len(value) <= len(array), \"Array to compare against must contain a smaller or equal amount of elements.\"\n expected = self.array(value).name\n actual = \"la a2, \" + self._lookup_array(array)\n self._compare_int_array(array.name, actual, expected, value, exit_code = 2)", "def containsValue(self, value):\n for val in values():\n if val == value or val == value:\n return True\n return False", "def value_checker(self, puzzle: List[int], value: int) -> bool:\n if len(puzzle) == 0:\n return False\n\n if len(puzzle) == 1:\n if puzzle[0] is value:\n return True\n else:\n return False\n\n mid = len(puzzle) // 2\n left = self.value_checker(puzzle[:mid], value)\n right = self.value_checker(puzzle[mid:], value)\n\n return left or right", "def _is_key_value_array(self, data):\n for d in data:\n if not self._is_key_value(d):\n return False\n return True", "def isAny(self,test):\n for x in np.nditer(self.t, op_flags=['readonly']):\n if op(x):\n return True\n return False", "def edge_case(values):\r\n for val in values:\r\n if val is True:\r\n return False\r\n return True", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def check_all_iterable_values_equal(iterable):\n return all(second_value_onwards == iterable[0] for second_value_onwards in iterable[1:])", "def _has_at_least_one_value(self, i, values):\n for a in values:\n j = self.attributes.index(a)\n v = values[a]\n if self[i][j] == v:\n return True\n return False", "def check_solved(self, values):\n if values == None: #Forward_checking determines that values state is invalid -> set false, check if false here.\n return False\n\n for box in values.keys():\n if len(values[box]) != 1:\n return False\n return True", "def array_equal_to(obj):\n return ArrayIsEqual(obj)", "def check_array(self, v, t):\n raise NotImplementedError('check_array')", "def _has_values(self, i, values):\n for a in values:\n j = self.attributes.index(a)\n v = values[a]\n if self[i][j] != v:\n return False\n return True", "def are_equal(value1, value2):\n if value1 == None or value2 == None:\n return True\n if value1 == None or value2 == None:\n return False\n return value1 == value2", "def __call__(self, value: np.ndarray) -> bool:\n for k, bound in enumerate(self.bounds):\n if bound is not None:\n if np.any((value > bound) if k else (value < bound)):\n return False\n return True", "def any_values(*values):\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield any(v)", "def _all_na_or_values(series, values):\n series_excl_na = series[series.notna()]\n if not len(series_excl_na):\n out = True\n elif series_excl_na.isin(values).all():\n out = True\n else:\n out = False\n return out", "def array_equal_nan(a1, a2):\n try:\n a1, a2 = np.asarray(a1), np.asarray(a2)\n except Exception:\n return False\n if a1.shape != a2.shape:\n return False\n # Handling NaN values\n a1nan, a2nan = np.isnan(a1), np.isnan(a2)\n # NaN's occur at different locations\n if not (a1nan == a2nan).all():\n return False\n # Shapes of a1, a2 and masks are guaranteed to be consistent by this point\n return bool(np.asarray(a1[~a1nan] == a2[~a1nan]).all())", "def array_equivalent(left, right, strict_nan=False):\n\n left, right = np.asarray(left), np.asarray(right)\n\n # shape compat\n if left.shape != right.shape:\n return False\n\n # Object arrays can contain None, NaN and NaT.\n # string dtypes must be come to this path for NumPy 1.7.1 compat\n if is_string_dtype(left) or is_string_dtype(right):\n\n if not strict_nan:\n # isnull considers NaN and None to be equivalent.\n return lib.array_equivalent_object(\n _ensure_object(left.ravel()), _ensure_object(right.ravel()))\n\n for left_value, right_value in zip(left, right):\n if left_value is NaT and right_value is not NaT:\n return False\n\n elif isinstance(left_value, float) and np.isnan(left_value):\n if (not isinstance(right_value, float) or\n not np.isnan(right_value)):\n return False\n else:\n if left_value != right_value:\n return False\n return True\n\n # NaNs can occur in float and complex arrays.\n if is_float_dtype(left) or is_complex_dtype(left):\n return ((left == right) | (np.isnan(left) & np.isnan(right))).all()\n\n # numpy will will not allow this type of datetimelike vs integer comparison\n elif is_datetimelike_v_numeric(left, right):\n return False\n\n # M8/m8\n elif needs_i8_conversion(left) and needs_i8_conversion(right):\n if not is_dtype_equal(left.dtype, right.dtype):\n return False\n\n left = left.view('i8')\n right = right.view('i8')\n\n # NaNs cannot occur otherwise.\n try:\n return np.array_equal(left, right)\n except AttributeError:\n # see gh-13388\n #\n # NumPy v1.7.1 has a bug in its array_equal\n # function that prevents it from correctly\n # comparing two arrays with complex dtypes.\n # This bug is corrected in v1.8.0, so remove\n # this try-except block as soon as we stop\n # supporting NumPy versions < 1.8.0\n if not is_dtype_equal(left.dtype, right.dtype):\n return False\n\n left = left.tolist()\n right = right.tolist()\n\n return left == right", "def all_equal(sequence):\n return all(x == sequence[0] for x in sequence)", "def is_integer(value: Union[float, np.ndarray]) -> bool:\n if type(value) == np.ndarray:\n for entry in value:\n result = Comparator.is_integer(entry)\n if not result:\n return False\n return True\n else:\n value = abs(value)\n value -= int(value)\n if value > 0.5:\n return Comparator.is_close_to_zero(1 - value)\n return Comparator.is_close_to_zero(value)", "def __is_valid_value(self, target_row, target_col, value):\n if value == 0:\n return True # 0's are always a valid value since they are a placeholder (signify empty position)\n\n # Check row and column:\n for i in range(9):\n if self.final_values[i][target_col] == value and i != target_row: # Check column\n return False # Value appears on the same column twice\n if self.final_values[target_row][i] == value and i != target_col: # Check row\n return False # Value appears on the same row twice\n\n # Find start of 3x3 block:\n block_row = target_row - (target_row % 3)\n block_col = target_col - (target_col % 3)\n\n # Check each element in the 3x3 block:\n for row in range(3):\n for col in range(3):\n if value == self.final_values[block_row + row][block_col + col] and block_row + row != target_row and block_col + col != target_col:\n return False # Value appears in the same block twice\n\n return True # Value does not appear in the same row, col or block", "def _validate_value_type(value: Any, expected: Sequence[Type]) -> bool:\n\n for entry in expected:\n if get_origin(entry) is None:\n if type(value) == entry: # pylint: disable=unidiomatic-typecheck\n return True\n continue\n if _validate_value_type(value, get_args(entry)):\n return True\n return False", "def _confirm_constant(a):\n a = np.asanyarray(a)\n return np.isclose(a, 1.0).all(axis=0).any()", "def arrNotInArrList(arr, arrList):\n a = np.array(arr)\n for item in arrList:\n item = np.array(item)\n if np.array_equiv(item, a):\n return False\n return True", "def all_values(*values):\n print(\"here\")\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield all(v)", "def is_null(value: Any) -> bool:\n return not value", "def is_array(val):\n return (\n isinstance(val, tuple) or \\\n isinstance(val, dict) or \\\n isinstance(val, list)\n )", "def eval_list(self, value):\n\n okay = True\n count = 0\n for v in value.elts:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay", "def all_equal(list_a, list_b):\n if len(list_a) != len(list_b):\n return False\n a, b = np.array(list_a), np.array(list_b)\n return all(a == b)", "def test_values_eq(self):\r\n dtype = self.dtype\r\n if dtype is None:\r\n dtype = theano.config.floatX\r\n\r\n # We need big shape as in the past there have been a bug in the\r\n # sparse values_eq_approx.\r\n shp = (1024,1024)\r\n\r\n #Test the case with all zeros element\r\n rng = numpy.random.RandomState(utt.fetch_seed())\r\n for x in [numpy.asarray(rng.rand(*shp), dtype=dtype),\r\n numpy.zeros(shp, dtype=dtype)]:\r\n zeros = (x==0).all()\r\n x = self.cast_value(x)\r\n x_shared = self.shared_constructor(x, borrow=True)\r\n\r\n y = x.copy()\r\n y[0,0],y[1,0] = y[1,0],y[0,0]\r\n y = self.cast_value(y)\r\n\r\n assert x_shared.type.values_eq(x, x)\r\n assert x_shared.type.values_eq_approx(x, x)\r\n if not zeros:\r\n assert not numpy.allclose(self.ref_fct(x), self.ref_fct(y))\r\n assert not x_shared.type.values_eq(x, y)\r\n assert not x_shared.type.values_eq_approx(x, y)", "def in_array(val, obj):\n return (val in obj)", "def __eq__(self, seq):\n # If seq is different length or not a list, then it is not equal\n if self._length != len(seq) or not isinstance(seq, list):\n return False\n # seq is equal if every element at the same index has equivalent value\n return all(self._arr[i] == seq[i] for i in range(self._length))", "def all(a: list[int], b: int) -> bool:\n i = 0\n if len(a) == 0:\n return False\n else:\n while i < len(a):\n if a[i] == b:\n i += 1\n else:\n return False\n return True", "def linear_search_iterative(array, value):\n for elt in array:\n if compare(elt, value) == 0:\n return True\n\n return False", "def __isZeroEverywhere(self, array):\n epsilon = numpy.finfo( type(array[0]) ).eps\n boolList = numpy.less_equal(numpy.abs(array), epsilon)\n\n for b in boolList:\n if not b:\n return False\n return True", "def is_arrayexpress_array(val):\n return arrayexpress_array_regexp.match(val)", "def is_close_to_zero(value: Union[float, np.ndarray]) -> Union[bool, np.ndarray]:\n return abs(value) < 1.0e-10", "def all_user(iterable):\n for element in iterable:\n if not element:\n return False\n return True", "def check_array_pointer(self, register: str, value: List[int]):\n assert self._call is not None, f\"You must first call a function before checking its return values!\"\n \"\"\" Checks that when this function is called, we have not already assembled and run the test. \"\"\"\n assert not self._has_executed, f\"Test has already been assembled and run!\"\n assert len(value) > 0, \"Array to compare against has to contain at least one element.\"\n saved_register = self._parse_register(register)\n array_name = f\"array pointed to by {register}\"\n expected = self.array(value).name\n actual = f\"mv a2 {saved_register}\"\n self._compare_int_array(array_name, actual, expected, value, exit_code = 2)", "def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False", "def all(self, key: Callable[[T], bool]=None) -> bool:\n if key is None:\n return all(self.array)\n return all(key(x) for x in self.array)", "def in_array(array1, array2):", "def test_correct_p_values_all_None(self):\r\n exp = [None, None]\r\n obs = self.mc._correct_p_values([None, None])\r\n self.assertEqual(obs, exp)", "def checkArrayAllClose(comment,value,expected,results,tol=1e-11):\n if np.allclose(value,expected,rtol=tol,atol=tol):\n results[\"pass\"] += 1\n else:\n print(\"checking answer\",comment,value,\"!=\",expected)\n results[\"fail\"] += 1", "def solved(values):\n # for box in values.keys():\n # if len(values[box]) != 1:\n # return False\n # return True\n return len([box for box in values.keys() if len(values[box]) != 1]) == 0", "def _values_of_same_type(self, val1, val2):\n if (type(val1) in (np.ndarray, tuple, np.matrix)) and (\n type(val2) is type(val1)\n ):\n return True\n else:\n return super(ArrayParameter, self)._values_of_same_type(val1, val2)", "def row_is_in_array(row, array):\n return any((array[:] == row).all(1))", "def checkElementInArray(element,array):\n\t\n\texists = False\n\t\n\tfor i in array:\n\t\n\t\tif i == element:\n\t\t\texists = True\n\n\treturn exists", "def _all_equal(arg):\n return arg.count(arg[0]) == len(arg)", "def isPossibleAssign(self, position, value):\n\n # Check horizontal\n for i, x in enumerate(self.board[position[0], :]):\n if i != position[1] and x == value:\n return False\n\n # Check vertical\n for i, x in enumerate(self.board[:, position[1]]):\n if i != position[0] and x == value:\n return False\n\n # Check square\n square = [\n self.board[\n (position[0] // 3) * 3 + i,\n (position[1] // 3) * 3 + j\n ]\n for i in range(3) for j in range(3)\n ]\n for i, x in enumerate(square):\n if i != (position[0] % 3 * 3 + position[1] % 3) and x == value:\n return False\n\n return True", "def full(self):\n for x in range(0,3):\n for y in range(0,3):\n if self[x,y] is None:\n return False\n return True", "def values_eq(self, a, b):\r\n return a == b", "def all(iterable):\n for item in iterable:\n if not item:\n return False\n return True", "def _is_equal_to_atom(self, atom):\n\n return (self.type == atom.type and self.shape == atom.shape\n and self.itemsize == atom.itemsize\n and np.all(self.dflt == atom.dflt))", "def allequal(a, b, flavor=\"numpy\"):\n\n # print(\"a-->\", repr(a))\n # print(\"b-->\", repr(b))\n if not hasattr(b, \"shape\"):\n # Scalar case\n return a == b\n\n if ((not hasattr(a, \"shape\") or a.shape == ()) and\n (not hasattr(b, \"shape\") or b.shape == ())):\n return a == b\n\n if a.shape != b.shape:\n if verbose:\n print(\"Shape is not equal:\", a.shape, \"!=\", b.shape)\n return 0\n\n # Way to check the type equality without byteorder considerations\n if hasattr(b, \"dtype\") and a.dtype.str[1:] != b.dtype.str[1:]:\n if verbose:\n print(\"dtype is not equal:\", a.dtype, \"!=\", b.dtype)\n return 0\n\n # Rank-0 case\n if len(a.shape) == 0:\n if a[()] == b[()]:\n return 1\n else:\n if verbose:\n print(\"Shape is not equal:\", a.shape, \"!=\", b.shape)\n return 0\n\n # null arrays\n if a.size == 0: # len(a) is not correct for generic shapes\n if b.size == 0:\n return 1\n else:\n if verbose:\n print(\"length is not equal\")\n print(\"len(a.data) ==>\", len(a.data))\n print(\"len(b.data) ==>\", len(b.data))\n return 0\n\n # Multidimensional case\n result = (a == b)\n result = np.all(result)\n if not result and verbose:\n print(\"Some of the elements in arrays are not equal\")\n\n return result", "def has_value(cls, value):\n return any(value == item.value for item in cls)", "def has_value(cls, value):\n return any(value == item.value for item in cls)", "def notNone(a):\n return a.count(None) != len(a)", "def any_user(iterable):\n for element in iterable:\n if element:\n return True\n return False", "def any(self, values: pdarray) -> Tuple[Union[pdarray, List[Union[pdarray, Strings]]], pdarray]:\n if values.dtype != bool:\n raise TypeError(\"any is only supported for pdarrays of dtype bool\")\n return self.aggregate(values, \"any\") # type: ignore", "def is_converged(self,a,b):\n return np.array_equal(a,b)", "def _check_equivalent(self, other, name, component, logger, tolerance):\n arr = getattr(self, component)\n other_arr = getattr(other, component)\n if arr is None:\n if other_arr is not None:\n logger.debug(\"%s has no %s component but 'other' does.\", name,\n component.upper())\n return False\n else:\n if tolerance > 0.:\n if not numpy.allclose(other_arr, arr, tolerance, tolerance):\n logger.debug(\"%s %s values are not 'close'.\", name,\n component.upper())\n return False\n else:\n if (other_arr != arr).any():\n logger.debug('%s %s values are not equal.', name,\n component.upper())\n return False\n return True", "def _check_nan_array(array):\n # count nan\n mask = np.isnan(array)\n x = mask.sum()\n\n # check the NaN values of the array\n if x > 0:\n raise ValueError(\"Array has {0} NaN values.\".format(x))", "def any(self, key: Callable[[T], bool]=None) -> bool:\n if key is None:\n return any(self.array)\n return any(key(x) for x in self.array)", "def any(self) -> bool:", "def is_not_constant(series: np.ndarray) -> bool:\n #print(\"enter bartpy/bartpy/data.py is_not_constant\")\n \n if len(series) <= 1:\n #print(\"-exit bartpy/bartpy/data.py is_not_constant\")\n return False\n first_value = None\n for i in range(1, len(series)):\n # if not series.mask[i] and series.data[i] != first_value:\n if series[i] != first_value:\n if first_value is None:\n first_value = series.data[i]\n else:\n #print(\"-exit bartpy/bartpy/data.py is_not_constant\")\n return True\n #print(\"-exit bartpy/bartpy/data.py is_not_constant\")\n return False", "def all(x) -> bool:\n pass", "def is_array(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_array)", "def all(b: list[int], a: int) -> bool:\n i: int = 0\n while i < len(b):\n if b[i] == a:\n if i == len(b) - 1:\n return True\n i += 1\n else:\n return False\n return False", "def __eq__(self, value: object) -> bool:\n if not isinstance(value, _GENERIC_ALIAS_TYPE):\n return NotImplemented\n return (\n self.__origin__ == value.__origin__ and\n self.__args__ == value.__args__ and\n self.__unpacked__ == getattr(\n value, \"__unpacked__\", self.__unpacked__\n )\n )", "def find(self, value):\n checks = set()\n n = len(self.arr)\n\n for i in range(n): # 0...n-1\n el = self.arr[i]\n if value - el in checks:\n # hurray, found a pair\n return True\n checks.add(el)\n return False", "def is_none_or_nan(value):\n is_none = value == None\n is_nan = np.isnan(value)\n \n return is_none or is_nan", "def HasArrayOuts(self, function):\n if function.callback:\n for param in function.callback.params:\n if self._IsOrContainsArray(param.type_):\n return True\n return function.returns and self._IsOrContainsArray(function.returns)", "def shares_at_least_one_element_with(self, other_value):\n value = self._to_frozenset(self.value)\n other_value: frozenset = self._to_frozenset(other_value)\n if value & other_value:\n return True\n return False", "def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0", "def __eq__(self, other):\n # NOTE: This is debatable.\n # NOTE: Hopefully, the hash function should behave so well that\n # collisions are rare, therefore making it rare that the expensive\n # equality comparison is called.\n try:\n # numpy.all() works on zero-dimensional arrays.\n return numpy.all(numpy.equal(self.__value, other.__value))\n except ValueError:\n return False\n except AttributeError:\n raise TypeError(\"Not knowing how to compare.\")", "def has_valid_values(self):\n for element, value in self.items():\n if not (0 <= value <= 1):\n return False\n return True", "def uniform_list_check(value_list):\n return reduce((lambda acc, value: acc and value == value_list[0]), value_list, True)", "def is_valid_board(self):\n for (row, col), value in np.ndenumerate(self.final_values): # Iterate through each position\n if not self.__is_valid_value(row, col, value): # Check that the value is valid\n return False # An invalid (duplicate) value was found\n return True", "def __contains__(self, value):\n found = False\n for i in range(len(self.data)):\n if self.data[i] == value:\n return True\n return found", "def values_equal(t : Type, v1, v2) -> bool:\n return compare_values(t, v1, v2) == EQ", "def assertAllNan(self, a):\n is_nan = np.isnan(self._GetNdArray(a))\n all_true = np.ones_like(is_nan, dtype=np.bool)\n self.assertAllEqual(all_true, is_nan)", "def all(self):\n for v in self.sects.values():\n if not np.all(v):\n return False\n if self.is_full():\n return True\n else:\n return np.all(self.defval)", "def is_valid_value(self, value):\n return value in self.values", "def isscalar(x):\n arrayed_x = asarray(x)\n return asarray(x).ndim == 0 and arrayed_x.dtype != 'object'", "def test_equivalent():\n # Positive test\n assert u.equivalent(np.arange(10)*q.um, q.cm)\n\n # Negative units test\n assert not u.equivalent(np.arange(10)*q.um, q.Jy)\n\n # Negative dtype test\n assert not u.equivalent(np.arange(10), q.um)", "def is_identity(self) -> np.ndarray:\n if self.scalar_vector:\n return np.all(np.isclose(self.array, np.tile([1., 0., 0., 0.], (self.array.shape[0], 1))), axis=1)\n return np.all(np.isclose(self.array, np.tile([0., 0., 0., 1.], (self.array.shape[0], 1))), axis=1)", "def has_value(var) :\n return var != None", "def _is_tc_entity_array(self, data):\n for d in data:\n if not self._is_tc_entity(d):\n return False\n return True", "def check_equal_numpy(x, y):\r\n if isinstance(x, numpy.ndarray) and isinstance(y, numpy.ndarray):\r\n return (x.dtype == y.dtype and x.shape == y.shape and\r\n numpy.any(abs(x - y) < 1e-10))\r\n elif (isinstance(x, numpy.random.RandomState) and\r\n isinstance(y, numpy.random.RandomState)):\r\n return python_all(numpy.all(a == b) for a, b in\r\n izip(x.__getstate__(), y.__getstate__()))\r\n else:\r\n return x == y", "def _is_equal_to_enumatom(self, enumatom):\n\n return (self.enum == enumatom.enum and self.shape == enumatom.shape\n and np.all(self.dflt == enumatom.dflt)\n and self.base == enumatom.base)", "def any(self, values):\n return self.aggregate(values, \"any\")", "def any(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.any(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.any(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.any(part.view(ndarray), *args, **kwargs))", "def check_node_input_value(self, node, input_index: int, expected_value):\n assert len(node.input) > input_index\n\n value = self.model.get_constant_value(node.input[input_index])\n\n if isinstance(expected_value, list):\n return (isinstance(value, (ndarray, list))) and array_equal(expected_value, value, equal_nan=False)\n else:\n return value == expected_value", "def _coincident(a,b):\n return np.array_equal(a, b) or np.array_equal(np.flipud(a),b)", "def __bool__(self) -> bool:\n\n nonzeroes = np.absolute(self.value) > _eps\n\n if nonzeroes.any():\n return True\n else:\n return False", "def __eq__(self, value):\n return self.real != value" ]
[ "0.6699362", "0.6486526", "0.6385027", "0.6280364", "0.61277896", "0.612132", "0.6043027", "0.60365415", "0.5891498", "0.5891476", "0.5875021", "0.5753058", "0.5749066", "0.57388574", "0.5702565", "0.56945324", "0.5661426", "0.56569785", "0.56512713", "0.56494707", "0.56261206", "0.5598292", "0.5581735", "0.55642754", "0.5562079", "0.55550855", "0.5546242", "0.55428535", "0.55401695", "0.5523063", "0.5508727", "0.5505469", "0.55050087", "0.5504531", "0.55005103", "0.5494477", "0.54833883", "0.54824114", "0.54613066", "0.5440889", "0.5427923", "0.54137325", "0.54046875", "0.54033047", "0.5396736", "0.5374754", "0.53629917", "0.53584504", "0.5355467", "0.5351539", "0.5351489", "0.53436583", "0.5338186", "0.5333864", "0.53271055", "0.53251314", "0.5323332", "0.5321825", "0.53069144", "0.53069144", "0.52957714", "0.5282042", "0.525508", "0.5249945", "0.5245621", "0.52430737", "0.52365136", "0.5225127", "0.5224426", "0.5217528", "0.52121896", "0.5209109", "0.52049255", "0.5201128", "0.5199386", "0.5190118", "0.5186671", "0.51851815", "0.5173684", "0.517278", "0.51713336", "0.5170545", "0.5169759", "0.51695675", "0.5160293", "0.5157705", "0.51416725", "0.51385987", "0.5136384", "0.5131302", "0.51257634", "0.5125326", "0.51248384", "0.5123302", "0.5116208", "0.51137817", "0.5111762", "0.5108001", "0.5104292", "0.51004094" ]
0.7799601
0
Convenience function to coerce `buf` to ndarraylike array or bytes. First check if `buf` can be zerocopy converted to a contiguous array. If not, `buf` will be copied to a newly allocated `bytes` object.
def ensure_contiguous_ndarray_or_bytes(buf) -> Union[NDArrayLike, bytes]: try: return ensure_contiguous_ndarray_like(buf) except TypeError: # An error is raised if `buf` couldn't be zero-copy converted return ensure_bytes(buf)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buffer_to_bytes(buf):\n if not isinstance(buf, bytes):\n buf = bytes(buf)\n return buf", "def test_array_as_buffer(parser):\n doc = parser.parse(b'''{\n \"d\": [1.2, 2.3, 3.4],\n \"i\": [-1, 2, -3, 4],\n \"u\": [1, 2, 3, 4, 5],\n \"x\": [1, 2, 3, \"not valid\"]\n }''')\n\n memoryview(doc['d'].as_buffer(of_type='d'))\n memoryview(doc['i'].as_buffer(of_type='i'))\n memoryview(doc['u'].as_buffer(of_type='u'))\n\n # Not a valid `of_type`.\n with pytest.raises(ValueError):\n doc['i'].as_buffer(of_type='x')\n\n # Not a valid homogeneous array.\n with pytest.raises(TypeError):\n doc['x'].as_buffer(of_type='u')\n\n # Signed elements should error on cast.\n with pytest.raises(ValueError):\n doc['i'].as_buffer(of_type='u')", "def _to_bytes_or_str_array(result, output_dtype_like=None):\n ret = numpy.asarray(result.tolist())\n dtype = getattr(output_dtype_like, 'dtype', None)\n if dtype is not None:\n return ret.astype(type(dtype)(_get_num_chars(ret)), copy=False)\n return ret", "def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)", "def to_bytearray(x):\n if isinstance(x, bytearray):\n return x\n else:\n return bytearray(x)", "def tobytes(data, buf_len):\n buf = bytearray(data)\n buffer = (ctypes.c_byte * buf_len).from_buffer(buf)\n return buffer", "def toubytes(data, buf_len):\n buf = bytearray(data)\n buffer = (ctypes.c_ubyte * buf_len).from_buffer(buf)\n return buffer", "def to_bytes_io(b):\n return b.buffer if sys.version_info.major == 3 else b", "def frombuffer(buffer, **kwargs):\n\n return call_origin(numpy.frombuffer, buffer, **kwargs)", "def decode_buffer(buf):\n return buf.getvalue().decode('utf-8')", "def _to_arraylike(data):\n _load_objects()\n if data is None:\n raise ValueError('Cannot convert None data.')\n return None\n if not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index)):\n data = np.asarray(data)\n if not np.iterable(data):\n data = np.atleast_1d(data)\n return data", "def _read_to_buffer(cls, buf, stream):\n # We could read it in one step, but instead we'll read it in chunks to avoid big temporaries.\n # (See below.)\n # buf[:] = stream.read( len(buf) )\n\n # Read data from the stream in chunks\n remaining_bytes = len(buf)\n while remaining_bytes > 0:\n next_chunk_bytes = min( remaining_bytes, VoxelsNddataCodec.STREAM_CHUNK_SIZE )\n chunk_start = len(buf)-remaining_bytes\n chunk_stop = len(buf)-(remaining_bytes-next_chunk_bytes)\n buf[chunk_start:chunk_stop] = stream.read( next_chunk_bytes )\n remaining_bytes -= next_chunk_bytes", "def netcdf_compatible_array(arry):\n arry = strip_array_wrappers(arry)\n\n if arry.ndim > 0:\n for _ in range(3):\n if arry.dtype.char != \"O\" or arry.ndim == 0:\n break\n\n if arry.shape[0] == 1:\n arry = np.array(arry[0])\n else:\n arry = np.array(tuple(arry))\n\n if \"S\" in arry.dtype.char:\n return np.char.decode(arry, \"ascii\")\n # TODO: ensure no float16, ...\n return arry", "def get_buf(self, data_type = \"void\"):\n if self.buf is not None:\n return ffi.cast(data_type + \"*\", self.buf)\n else:\n raise RuntimeError(\"Buffer not created.\")", "def _as_numpy(y):\n if y is None:\n return None\n elif isinstance(y, np.ndarray):\n return np.copy(y)\n elif hasattr(y, 'as_matrix'):\n return y.as_matrix()\n elif hasattr(y, 'tolist'):\n return y.tolist()\n elif is_iterable(y):\n return np.asarray([i for i in y]) # might accidentally force object type in 3\n raise TypeError('cannot convert type %s to numpy ndarray' % type(y))", "def _deserialize_byte_array(shape, ba, offset):\n ar = ndarray(shape=shape, buffer=ba, offset=offset, dtype=\"float64\",\n order='C')\n return ar.copy()", "def test_array_as_buffer_ndim(parser):\n doc = parser.parse(b'''[[\n [1.0, 2.0],\n [3.0, 4.0]\n ]]''')\n view = memoryview(doc.as_buffer(of_type='d'))\n assert len(view) == 32", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgCertificateChain._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def make_buffer():\n return BytesIO()", "def test_asarraylike_array():\n arr = np.array([1, 2, 3, 4])\n result = util.asarraylike(arr)\n\n assert result is arr", "def load_frombuffer(buf):\n if not isinstance(buf, string_types + tuple([bytes])):\n raise TypeError('buf required to be a string or bytes')\n out_size = mx_uint()\n out_name_size = mx_uint()\n handles = ctypes.POINTER(NDArrayHandle)()\n names = ctypes.POINTER(ctypes.c_char_p)()\n check_call(_LIB.MXNDArrayLoadFromBuffer(buf,\n mx_uint(len(buf)),\n ctypes.byref(out_size),\n ctypes.byref(handles),\n ctypes.byref(out_name_size),\n ctypes.byref(names)))\n if out_name_size.value == 0:\n return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]\n else:\n assert out_name_size.value == out_size.value\n return dict(\n (py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))\n for i in range(out_size.value))", "def as_bytes(array_or_image,mimetype='image/png'):\n buf = StringIO()\n fmt = mimetype2format(mimetype)\n im = as_pil(array_or_image).save(buf,fmt)\n return buf.getvalue()", "def test_safe_array_cast(self):\n msg = '^Copying array of size \\(5, 5\\) to convert it in the ' \\\n 'right format$'\n with self.assertWarnsRegex(RuntimeWarning, msg):\n PoissonRegression._safe_array(self.X.astype(int))\n\n msg = '^Copying array of size \\(3, 5\\) to create a ' \\\n 'C-contiguous version of it$'\n with self.assertWarnsRegex(RuntimeWarning, msg):\n PoissonRegression._safe_array(self.X[::2])\n\n np.testing.assert_array_equal(self.X,\n PoissonRegression._safe_array(self.X))", "def array(a: any,\n dtype: any = None,\n order: {'C', 'F', 'A', 'K'} = 'K',\n *,\n alignment: int = 16,\n copy: bool = True,\n **kwargs) -> np.ndarray:\n\n # Store reference to the original array\n _a = a\n\n # Get array\n a = np.asarray(_a, dtype=dtype, order=order)\n\n # Check if a new copy is created\n _new = a is not _a\n\n # Get dtype, size and alignment\n dtype = a.dtype\n shape = a.shape\n size = np.prod(shape)\n order = 'C' if a.flags.c_contiguous else 'F'\n alignment = int(alignment)\n\n # Check alignment is compatible\n if alignment % dtype.itemsize:\n raise ValueError(\n f\"{dtype} is not compatible with 'alignment={alignment}'\")\n\n # If new, check alignment and eventually return if already aligned\n if (_new or not copy) and isaligned(a, alignment=alignment):\n return a\n\n # Get max_shift\n max_shift = alignment // dtype.itemsize\n\n # If _new, resize\n if _new:\n # Resize memory\n a.resize(size + max_shift)\n\n # Reference to buffer\n buffer = a\n\n # Return to the orginal size\n a = a[:size]\n\n # Otherwise, get new buffer\n else:\n buffer = np.empty((size + max_shift,), dtype=dtype, order=order)\n\n # Get right shift\n shift = (alignment - (buffer.ctypes.data % alignment)) // dtype.itemsize\n assert (shift <= max_shift)\n\n # Re-align if needed\n buffer = buffer[shift:size + shift]\n\n # Reshape\n buffer = np.reshape(buffer, shape, order=order)\n\n # Check alignment\n assert (isaligned(buffer, alignment=alignment))\n\n # Copy if a was provided\n np.copyto(buffer, np.reshape(a, shape, order=order))\n\n # Return buffer\n return buffer", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaCertificate._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def convert_array(blob):\n out = io.BytesIO(blob)\n out.seek(0)\n\n return np.load(out)", "def asanyarray(a, dtype=None, order='C'):\n\n if not use_origin_backend(a):\n # if it is already dpnp.ndarray then same object should be returned\n if isinstance(a, dpnp.ndarray):\n return a\n\n if order != 'C':\n checker_throw_value_error(\"asanyarray\", \"order\", order, 'C')\n\n return array(a, dtype=dtype, order=order)\n\n return call_origin(numpy.asanyarray, a, dtype, order)", "def as_bytearray(self):\n\n if self.index < 7:\n return self.buf + bytearray([self.byte])\n else:\n return self.buf", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgCertificateChainDep._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def test_write_bufferprotocol(ctx):\n data = array('f', [1, 2, 3, 4])\n buff = ctx.buffer(data=data)\n assert buff.read() == data.tobytes()", "def cpointer_to_ndarray(ptr, size, dtype, shape):\n buf = np.core.multiarray.int_asbuffer(\n ctypes.addressof(ptr.contents), np.dtype(dtype).itemsize * size)\n arr = np.ndarray(shape, dtype=dtype, buffer=buf)\n return arr", "def readinto(self, buf: bytearray, nbytes: Optional[int] = None) \\\n -> Optional[int]:\n ...", "def test_to_Bytes(self) -> None:\n self.assertEqual(to_bytes('Hello'),\n bytearray('Hello', 'utf-8'),\n \"Check that to_bytes creates byte array when presented with non byte string.\")", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaSignatureDepB._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def memcopy(dst, src, offset=0, length=None):\n length = length if length is not None else len(src)\n assert type(dst) == np.ndarray, 'invalid type for \"dst\" in memcopy'\n if type(src) is not np.ndarray:\n if type(src) is str and six.PY3:\n src = src.encode()\n src = np.frombuffer(src, dtype='uint8', count=len(src))\n\n dst[:] = src[offset:offset + length]", "def bdecode_buffer(data):\n\tif isinstance(data, str):\n\t\tdata = data.encode()\n\twith BytesIO(data) as f:\n\t\treturn bdecode(f)", "def asarray(input, dtype=None, order='C'):\n\n if (use_origin_backend(input)):\n return numpy.asarray(input, dtype=dtype, order=order)\n\n return array(input, dtype=dtype, order=order)", "def asarray(\n x1,\n dtype=None,\n copy=False,\n order=\"C\",\n device=None,\n usm_type=None,\n sycl_queue=None,\n):\n dpu.validate_usm_type(usm_type, allow_none=True)\n\n if order is None:\n order = \"C\"\n\n \"\"\"Converts incoming 'x1' object to 'dpnp_array'.\"\"\"\n if isinstance(x1, (list, tuple, range)):\n array_obj = dpt.asarray(\n x1,\n dtype=dtype,\n copy=copy,\n order=order,\n device=device,\n usm_type=usm_type,\n sycl_queue=sycl_queue,\n )\n else:\n if isinstance(x1, dpnp_array):\n x1_obj = x1.get_array()\n else:\n x1_obj = x1\n\n sycl_queue_normalized = dpnp.get_normalized_queue_device(\n x1_obj, device=device, sycl_queue=sycl_queue\n )\n\n array_obj = dpt.asarray(\n x1_obj,\n dtype=dtype,\n copy=copy,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized,\n )\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)", "def ascii_to_numpy(ascii_diagram, as_bytes=True):\n ascii_diagram = [list(i) for i in ascii_diagram]\n ascii_diagram = np.array(ascii_diagram)\n v_to_bytes = np.vectorize(to_bytes)\n return v_to_bytes(ascii_diagram) if as_bytes else ascii_diagram", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaSignature._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def data_from_array(array, depth):\n if depth == 8:\n # buffer is big-endian, but its not applicable since we're dealing only with bytes \n return array.tobytes()\n else:\n raise ValueError(f\"Unsupported depth: {depth}\")", "def test_dtype_None(self):\n array = np.array([[0, 1, 2], [2, 1, 0]]).T\n self.assertTrue(to_ndarray(array, None, safe=True).flags.contiguous,\n msg='to_ndarray: Non contiguous arrays are not being consolidated when dtype is None')", "def asarray(obj, itemsize=None, unicode=None, order=None):\n return array(obj, itemsize, copy=False,\n unicode=unicode, order=order)", "def serialize_numpy(self, buff, numpy):\n try:\n pass\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def convert_bytearray(func):\n def wrapped(ber, *args, **kwargs):\n return func(bytearray(ber), *args, **kwargs)\n return wrapped", "def read_buffer1d(fobj, dtype, endian=''):\n\n (npix,) = struct.unpack(endian + 'i', fobj.read(4))\n if dtype == 'float':\n arr = npy.fromfile(file=fobj, dtype=npy.float32, count=npix)\n elif dtype == 'double':\n arr = npy.fromfile(file=fobj, dtype=npy.float64, count=npix)\n else:\n raise CppError('read_buffer1d: do not recogniise dtype = ' + str(dtype))\n return arr", "def ascontiguousarray(a, dtype=None):\n\n if not use_origin_backend(a):\n # we support only c-contiguous arrays for now\n # if type is the same then same object should be returned\n if isinstance(a, dpnp.ndarray) and a.dtype == dtype:\n return a\n\n return array(a, dtype=dtype)\n\n return call_origin(numpy.ascontiguousarray, a, dtype)", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEd25519CertificateDep._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def arrayobj1d(inp: Iterable, copy=False) -> np.ndarray:\n return np.array([None] + list(inp), dtype=object, copy=copy)[1:]", "def bencode_buffer(data):\n\twith BytesIO() as f:\n\t\tbencode(data, f)\n\t\treturn f.getvalue()", "def get_buffer_from_arr(np_arr):\n\n pointer_address = get_array_address(np_arr)\n pointer = native_ops.pointerForAddress(pointer_address)\n size = np_arr.size\n if np_arr.dtype == 'float64':\n as_double = DoublePointer(pointer)\n return Nd4jBuffer(nd4j.createBuffer(as_double, size))\n elif np_arr.dtype == 'float32':\n as_float = FloatPointer(pointer)\n return Nd4jBuffer(nd4j.createBuffer(as_float, size))\n elif np_arr.dtype == 'int64':\n as_int = IntPointer(pointer)\n return Nd4jBuffer(data_buffer=nd4j.createBuffer(as_int, size),\n numpy_pointer=_get_numpy_buffer_reference(np_arr))", "def pyarrow_array(arr, nan_to_null=False):\n import numpy as np\n import pyarrow as pa\n if nan_to_null and issubclass(arr.dtype.type,\n (np.floating, np.complexfloating)):\n isnan = np.isnan(arr)\n if isnan.any():\n pa_nul = pa.py_buffer(get_bitmap(isnan))\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [pa_nul, pa.py_buffer(arr)])\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [None, pa.py_buffer(arr)])", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaSignatureDepA._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def read_image_as_numpy(bytes_obj: Optional[bytes]=None) ->Optional[torch.Tensor]:\n try:\n with BytesIO(bytes_obj) as buffer:\n image = np.load(buffer)\n return torch.from_numpy(image)\n except Exception as e:\n warnings.warn(f'Failed to read image from numpy file. Original exception: {e}')\n return None", "def _asarray(a, dtype, order=None):\r\n if str(dtype) == 'floatX':\r\n dtype = theano.config.floatX\r\n dtype = numpy.dtype(dtype) # Convert into dtype object.\r\n rval = numpy.asarray(a, dtype=dtype, order=order)\r\n # Note that dtype comparison must be done by comparing their `num`\r\n # attribute. One cannot assume that two identical data types are pointers\r\n # towards the same object (e.g. under Windows this appears not to be the\r\n # case).\r\n if rval.dtype.num != dtype.num:\r\n # Type mismatch between the data type we asked for, and the one\r\n # returned by numpy.asarray.\r\n # If both types have the same string description (byte order, basic\r\n # type, and number of bytes), then it is safe to return a view.\r\n if (dtype.str == rval.dtype.str):\r\n # Silent fix.\r\n return rval.view(dtype=dtype)\r\n else:\r\n # Unexpected mismatch: better know what is going on!\r\n raise TypeError('numpy.array did not return the data type we '\r\n 'asked for (%s %s #%s), instead it returned type '\r\n '%s %s #%s: function '\r\n 'theano._asarray may need to be modified to handle this '\r\n 'data type.' %\r\n (dtype, dtype.str, dtype.num, rval.dtype, rval.str, rval.dtype.num))\r\n else:\r\n return rval", "def as_buffer(\n cls,\n obj: torch.Tensor,\n counts: Tuple[int] = None,\n displs: Tuple[int] = None,\n is_contiguous: Optional[bool] = None,\n ) -> List[Union[MPI.memory, Tuple[int, int], MPI.Datatype]]:\n squ = False\n if not obj.is_contiguous() and obj.ndim == 1:\n # this makes the math work below this function.\n obj.unsqueeze_(-1)\n squ = True\n\n mpi_type, elements = cls.mpi_type_and_elements_of(obj, counts, displs, is_contiguous)\n mpi_mem = cls.as_mpi_memory(obj)\n if squ:\n # the squeeze happens in the mpi_type_and_elements_of function in the case of a\n # non-contiguous 1D tensor. Squeezing it puts the memory back to where it should be\n obj.squeeze_(-1)\n return [mpi_mem, elements, mpi_type]", "def frombuffer(self, slice_data):\n return NotImplemented", "def encode_byte_array(value: bytes) -> bytes:\n return bytes([]) if isinstance(value, type(None)) else value", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEd25519SignatureDepB._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def to_numpy(x: Union[torch.Tensor, np.ndarray, Any, None]) -> Union[np.ndarray, None]:\n if x is None:\n return None\n elif torch.is_tensor(x):\n return x.data.cpu().numpy()\n elif isinstance(x, np.ndarray):\n return x\n elif isinstance(x, (Iterable, int, float)):\n return np.array(x)\n else:\n raise ValueError(\"Unsupported type\")", "def fig2buf(fig):\n # draw the renderer\n fig.canvas.draw()\n\n # Get the RGBA buffer from the figure\n w,h = fig.canvas.get_width_height()\n buf = np.fromstring ( fig.canvas.tostring_argb(), dtype=np.uint8 )\n buf.shape = (h, w, 4)\n \n # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode\n buf = np.roll(buf, 3, axis = 2 )\n buf = buf[0::1,0::1] #slice to make image 4x smaller and use only the R channel of RGBA\n buf = buf[0::1,0::1, 0:3] #slice to make image 4x smaller and use only the R channel of RGBA\n return buf", "def toubyte(data):\n buf = bytearray(data)\n buffer = (ctypes.c_ubyte).from_buffer(buf)\n return buffer", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEd25519SignatureDepA._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def contiguous( cls, source, typeCode=None ):\n typeCode = GL_TYPE_TO_ARRAY_MAPPING[ typeCode ]\n try:\n contiguous = source.flags.contiguous\n except AttributeError:\n if typeCode:\n return numpy.ascontiguousarray( source, typeCode )\n else:\n return numpy.ascontiguousarray( source )\n else:\n if contiguous and (typeCode is None or typeCode==source.dtype.char):\n return source\n elif (contiguous and cls.ERROR_ON_COPY):\n from OpenGL import error\n raise error.CopyError(\n \"\"\"Array of type %r passed, required array of type %r\"\"\",\n source.dtype.char, typeCode,\n )\n else:\n # We have to do astype to avoid errors about unsafe conversions\n # XXX Confirm that this will *always* create a new contiguous array\n # XXX Guard against wacky conversion types like uint to float, where\n # we really don't want to have the C-level conversion occur.\n # XXX ascontiguousarray is apparently now available in numpy!\n if cls.ERROR_ON_COPY:\n from OpenGL import error\n raise error.CopyError(\n \"\"\"Non-contiguous array passed\"\"\",\n source,\n )\n if typeCode is None:\n typeCode = source.dtype.char\n return numpy.ascontiguousarray( source, typeCode )", "def asarray_cpwarn(a, dtype=None, message='warning', comment=''):\n a_new = asarray(a, dtype)\n # must drop numpy's order argument since it conflicts\n # with Numeric's savespace\n\n # did we copy?\n if a_new is not a:\n # we do not return the identical array, i.e., copy has taken place\n msg = '%s copy of array %s, from %s to %s' % \\\n (comment, a.shape, type(a), type(a_new))\n if message == 'warning':\n print('Warning: %s' % msg)\n elif message == 'exception':\n raise TypeError(msg)\n return a_new", "def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):\n outputs = None\n if isinstance(inputs, (tuple, np.ndarray)):\n outputs = np.array(inputs)\n else:\n outputs = np.full(dim, inputs)\n\n if len(outputs) != dim:\n raise ValueError(\"The inputs array has a different dimension {}\"\n \" than provided, which is {}.\".format(len(outputs), dim))\n\n return outputs", "def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):\n outputs = None\n if isinstance(inputs, (tuple, np.ndarray)):\n outputs = np.array(inputs)\n else:\n outputs = np.full(dim, inputs)\n\n if len(outputs) != dim:\n raise ValueError(\"The inputs array has a different dimension {}\"\n \" than provided, which is {}.\".format(len(outputs), dim))\n\n return outputs", "def assertEqualBufferValue(self, buf, val):\n bufferValue = buf\n if isinstance(val, str):\n bufferValue = bufferValue.decode(\"utf-8\")\n\n if isinstance(bufferValue, list):\n if isinstance(val[0], str):\n bufferValue = [b.decode(\"utf8\") for b in bufferValue]\n self.assertEqual(bufferValue, val)", "def readinto(self, buf: bytearray, nack: bool = True) -> None:\n ...", "def safe_numpy_to_native(num):\n try:\n return num.item()\n except:\n return num", "def _datacopied(arr, original):\n if arr is original:\n return False\n if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'):\n return False\n return arr.base is None", "def _asarray(v):\n try:\n return np.asarray(v)\n except ValueError:\n return np.asarray(v, dtype=object)", "def fig2array(fig):\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n buf = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n buf.shape = (w, h, 3)\n return buf", "def _asarray1d(arr, copy=False):\n if copy:\n return asarray(arr).flatten()\n else:\n return asarray(arr).ravel()", "def recv_array(socket, flags=0, copy=True, track=False):\n md = socket.recv_json(flags=flags)\n msg = socket.recv(flags=flags, copy=copy, track=track)\n buf = buffer(msg)\n A = np.frombuffer(buf, dtype=md['dtype'])\n return A.reshape(md['shape'])", "def __send_like(\n self, func: Callable, buf: Union[DNDarray, torch.Tensor, Any], dest: int, tag: int\n ) -> Tuple[Optional[Union[DNDarray, torch.Tensor]]]:\n if isinstance(buf, DNDarray):\n buf = buf.larray\n if not isinstance(buf, torch.Tensor):\n return func(buf, dest, tag), None\n\n # in case of GPUs, the memory has to be copied to host memory if CUDA-aware MPI is not supported\n sbuf = buf if CUDA_AWARE_MPI else buf.cpu()\n return func(self.as_buffer(sbuf), dest, tag), sbuf", "def arg2array(arg):\n if isinstance(arg, (matrix, ndarray)):\n s = arg.shape\n if len(s) == 1:\n return array(arg)\n if min(s) == 1:\n return array(arg).flatten()\n \n elif isinstance(arg, list):\n return array(arg)\n \n elif isinstance(arg, (int, float, float32, float64)):\n return array([arg])\n \n raise ValueError", "def buffer(self) -> np.ndarray:\n return np.array(self._image_data, copy=False)", "def object_to_bytes(obj):\n if isinstance(obj, str):\n return bytearray(obj, \"UTF-8\")\n elif isinstance(obj, bool):\n return bytearray()\n elif isinstance(obj, int):\n return pack(\"<L\", obj)\n elif obj == None:\n return bytearray()\n elif isinstance(obj, bytearray):\n return obj\n else:\n #print type(obj), obj\n return obj.get_raw()", "def _datacopied(arr, original):\n if arr is original:\n return False\n if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):\n return False\n return arr.base is None", "def from_buffer(self, buf):\n with self.lock:\n # if we're on python3, convert buf to bytes\n # otherwise this string is passed as wchar*\n # which is not what libmagic expects\n if type(buf) == str and str != bytes:\n buf = buf.encode('utf-8', errors='replace')\n return magic_buffer(self.cookie, buf)", "def return_bytearray(func):\n def wrapped(*args, **kwargs):\n return bytearray(func(*args, **kwargs))\n return wrapped", "def check_numpy(x):\n if isinstance(x, torch.Tensor):\n x = x.detach().cpu().numpy()\n x = np.asarray(x)\n assert isinstance(x, np.ndarray)\n return x", "def check_type(a, b):\n\n if isinstance(a, np.ndarray):\n a = np.array(a, dtype=\"uint8\")\n if isinstance(b, np.ndarray):\n b = np.array(b, dtype=\"uint8\")\n\n if a.dtype != \"uint8\":\n a = a.astype(\"uint8\")\n\n if b.dtype != \"uint8\":\n b = b.astype(\"uint8\")\n\n return a, b", "def buffer_data_numpy(self) -> np.ndarray:\n # mask the last 4 bytes to reduce pixel format to mono/color mode and bit width info\n pixel_format = self.data.pixelFormat & 0xFFFF0000\n try:\n arr_dtype, arr_channels = PIXELFORMAT_TO_DTYPE_CHANNELS[pixel_format]\n except KeyError as ex:\n raise NotImplementedError('Pixel format not supported!') from ex\n\n arr_shape = (self.data.height, self.data.width, arr_channels) if arr_channels > 1 \\\n else (self.data.height, self.data.width)\n\n return np.ndarray(buffer=self.buffer_data(),\n dtype=arr_dtype,\n shape=arr_shape)", "def enforce_dtype(arr, dtype, msg=\"\"):\n if isinstance(arr, np.ndarray):\n if arr.dtype is not np.dtype(dtype):\n log_debug(\"enforcing dtype for array %s %s\" % (str(arr.dtype), msg))\n return np.array(arr, dtype)\n return arr", "def _asarray(datapointer, shape):\n if isinstance(shape, tuple):\n size = 1\n for item in shape:\n size *= item\n else:\n size = shape\n\n\n # Get the datatype\n T = ffi.getctype(ffi.typeof(datapointer).item)\n if T not in ctypes2nptypes:\n raise RuntimeError(\"Cannot create an array for element type: {}\".format(T))\n\n # Wrap the buffer in a numpy array\n buffer = ffi.buffer(datapointer, size * ffi.sizeof(T))\n return np.frombuffer(buffer, dtype=ctypes2nptypes[T]).reshape(shape)", "def _check_bytes_type(s):\n\n if (not isinstance(s, bytes)) and (not isinstance(s, bytearray)):\n msg = \"expected bytes-like object, not %s\" % s.__class__.__name__\n raise TypeError(msg)", "def Read(buf: IO[bytes]) -> Optional[bytes]:\n count_bytes = buf.read(_UINT64.size)\n if not count_bytes:\n return None\n\n try:\n (count,) = _UINT64.unpack(count_bytes)\n except struct.error as error:\n raise ValueError(f\"Incorrect size tag {count_bytes}: {error}\")\n\n # It might happen that we are given file with incorrect format. If the size\n # tag is interpreted as a huge number, reading the buffer will lead to raising\n # an exception, because Python will try to allocate a buffer to read into. If\n # possible, we try to check guard against such situations and provide more\n # informative exception message.\n\n def Error(left: int) -> ValueError:\n message = f\"Malformed input (reading {count} bytes out of {left} available)\"\n return ValueError(message)\n\n if buf.seekable():\n position = buf.tell()\n\n buf.seek(0, os.SEEK_END)\n size = buf.tell()\n\n if count > size - position:\n raise Error(size - position)\n\n buf.seek(position, os.SEEK_SET)\n\n chunk = buf.read(count)\n if len(chunk) != count:\n raise Error(len(chunk))\n\n return chunk", "def assure_numpy(a: Union[tf.Tensor, np.ndarray]) -> np.ndarray:\n if isinstance(a, np.ndarray):\n return a\n return a.numpy()", "def _tobuffer(self, object_):\n\n raise NotImplementedError", "def test02(self):\n a = np.array([], dtype=\"f8\")\n b = bcolz.fromiter(iter(a), dtype='f8', count=-1)\n assert_array_equal(b[:], a, \"fromiter does not work correctly\")", "def is_arraylike(obj):\n if isinstance(obj, list):\n return True\n elif isinstance(obj, np.ndarray):\n return True\n elif isinstance(obj, pd.Series):\n return True\n elif isinstance(obj, pd.DataFrame):\n return True\n return False", "def readinto(self, buf: bytes, /) -> Optional[int]:", "def decode_byte_array(as_bytes: typing.List[int]) -> bytes:\n return bytes(as_bytes)", "def readinto(self, buf: Any, nbytes: int=-1) -> int:\n ...", "def _serialize_buffer(buffer, array_serialization=None):\n if array_serialization == 'binary':\n # WARNING: in NumPy 1.9, tostring() has been renamed to tobytes()\n # but tostring() is still here for now for backward compatibility.\n return buffer.ravel().tostring()\n elif array_serialization == 'base64':\n return {'storage_type': 'base64',\n 'buffer': base64.b64encode(buffer).decode('ascii')\n }\n raise ValueError(\"The array serialization method should be 'binary' or \"\n \"'base64'.\")", "def test_to_byte_array(self):\n with Image.open(self.subject) as im:\n image = im.convert(\"RGB\")\n\n byte_array = image_helper.to_byte_array(image)\n\n self.assertGreater(len(byte_array), 0)", "def check_array_2D(X, coerce_to_numpy=True):\n X = check_is_numpy_or_pd(X)\n if X.ndim != 2:\n raise ValueError(\n \"If passed as a np.array, X must be a 2-dimensional \"\n \"array, but found shape: {}\".format(X.shape)\n )\n if X.size == 0:\n raise ValueError(\n \"Input is empty or have a dimension of size 0\"\n \", found shape: {}\".format(X.shape)\n )\n if isinstance(X, pd.DataFrame):\n if coerce_to_numpy:\n X = X.values\n return X", "def readinto(self, buf: bytes, nbytes: int, /) -> Optional[int]:" ]
[ "0.6886174", "0.6035865", "0.5953554", "0.55916303", "0.5561849", "0.54923177", "0.5430941", "0.53742135", "0.51953775", "0.5164276", "0.51263016", "0.5078041", "0.507584", "0.5072293", "0.50700045", "0.5063474", "0.50496", "0.5039605", "0.5037979", "0.50145006", "0.49787346", "0.49525878", "0.49493375", "0.49403405", "0.49342653", "0.49341303", "0.4930725", "0.49249688", "0.4923769", "0.49161765", "0.48887137", "0.48802826", "0.487455", "0.48320135", "0.48291564", "0.48275742", "0.4826898", "0.48048025", "0.4802659", "0.47903076", "0.4786776", "0.47657767", "0.47649512", "0.47632957", "0.47551188", "0.47417164", "0.47396472", "0.47346118", "0.47320676", "0.47217676", "0.47153416", "0.47058588", "0.47049487", "0.4704457", "0.47017482", "0.46820232", "0.466183", "0.46571684", "0.4656897", "0.46543813", "0.46478003", "0.46350425", "0.46177986", "0.46169224", "0.46111938", "0.46091095", "0.46091095", "0.46090248", "0.46074924", "0.45981303", "0.4596353", "0.4595845", "0.45934615", "0.45927036", "0.45860237", "0.45753315", "0.45741627", "0.4573722", "0.45726845", "0.45689794", "0.45638093", "0.45472452", "0.45417216", "0.45416197", "0.45383525", "0.45364517", "0.45293728", "0.4525528", "0.4508878", "0.45047906", "0.45023668", "0.45017916", "0.44916102", "0.44901073", "0.44898325", "0.44802412", "0.44763917", "0.44762683", "0.44727007", "0.44673714" ]
0.8558664
0
This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`.
def plot_confusion_matrix(cm,classes,normalize=False,title='Confusion Matrix', cmap=plt.cm.Blues): if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] cm=np.around(cm,decimals=2) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j],horizontalalignment="center",color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_confusion_matrix(cm, classes=[0,1], normalize=False, title='Confusion matrix', print_matrix=False):\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n if print_matrix:\n print(cm)", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix',saveas='cm', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n\n plt.figure() \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n foo_fig = plt.gcf() # 'get current figure'\n# foo_fig.savefig('confusion_matrix.eps', format='eps', dpi=1000) \n foo_fig.savefig(saveas, dpi=1000, bbox_inches='tight')\n plt.show()", "def plot_confusion_matrix(cm, y_test, y_pred, class_names,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('\\n')\n print(\"Normalized confusion matrix\")\n else:\n print('\\n')\n print('Confusion matrix, without normalization')\n print_cm(cm, class_names)\n text_labels = [['True Negative', 'False Positive'],\n ['False Negative', 'True Positive']]\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i - 0.1, format(cm[i, j], fmt),\n verticalalignment='bottom',\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.text(j, i + 0.1, text_labels[i][j],\n verticalalignment='top',\n horizontalalignment=\"center\",\n fontsize=12,\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n # Print accuracy and precision\n print('Accuracy: ', accuracy_score(y_test, y_pred, normalize=True))\n print('Precision: ', precision_score(y_test, y_pred, average='macro'))\n print('Roc-Auc: ', roc_auc_score(y_test, y_pred))\n # Plot non-normalized confusion matrix", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n #cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n cm = cm.astype('float') / np.sum(cm.ravel())\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig=plt.figure\n plt.imshow(cm, interpolation='nearest', cmap=cmap )\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n return fig", "def plot_confusion_matrix(\n y_true, y_pred, classes, normalize=True, title=\"Confusion matrix\", cmap=plt.cm.Blues\n):\n cm = confusion_matrix(y_true, y_pred)\n\n if normalize:\n cm = cm.astype(\"float\") / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(\"Confusion matrix, without normalization\")\n\n plt.imshow(cm, interpolation=\"nearest\", cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = \".2f\" if normalize else \"d\"\n thresh = cm.max() / 2.0\n for i, j in product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n )\n\n plt.tight_layout()\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n #print(\"Normalized confusion matrix\")\n #else:\n\n #print('Confusion matrix, without normalization')\n\n# print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n# plt.text(j, i, format(cm[i, j], fmt),\n# horizontalalignment=\"center\",\n# color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n cm = confusion_matrix(y_test,predictions)\n plt.figure()\n plot_confusion_matrix(cm, classes=[0,1,2], normalize=True,\n title='Confusion Matrix')", "def plot_confusion_matrix(y_test, y_pred, classes,\n normalize=True,\n title='Average accuracy \\n',\n cmap=plt.cm.Blues, verbose = 0, precision = 0):\n from sklearn.metrics import confusion_matrix\n import itertools\n \n cm = confusion_matrix(y_test, y_pred)\n accuracy = (np.sum(np.diag(cm)) / np.sum(cm)) * 100.0\n\n if normalize:\n cm = (cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]) * 100.0\n if verbose == 1:\n print(\"Normalized confusion matrix\")\n else:\n if verbose == 1:\n print('Confusion matrix, without normalization')\n \n if verbose == 1:\n print(cm)\n\n plt.figure(figsize=(18, 9))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title.format_map({'acc':accuracy}), fontsize=25)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45, fontsize=20)\n plt.yticks(tick_marks, classes, fontsize=20)\n\n fmt = '{:.'+ '%d'%(precision) +'f} %' if normalize else '{:d}'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, fmt.format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\", fontsize=16)\n plt.tight_layout()\n plt.ylabel('True label', fontsize=20)\n plt.xlabel('Predicted label', fontsize=20)", "def plot_confusion_matrix(self, cm, classes, normalize, cmap=plt.cm.Blues, title='confusin Matrix'):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n tick_marks = np.arange(len(classes))\r\n\r\n self.subplt.set_xlabel(\"Predicted label\")\r\n self.subplt.set_ylabel(\"True Label\")\r\n self.subplt.set_title(\"Confusion Matrix\")\r\n self.subplt.set_xticks(tick_marks,classes)\r\n self.subplt.set_yticks(tick_marks,classes)\r\n\r\n self.canvas2.show()", "def showConfusionMatrix(self): \r\n sn.heatmap(self.conf_matrix, annot=True)\r\n plt.plot( label=\"Accuracy\")\r\n plt.plot( label=\"Error\")\r\n plt.figtext(0,0,'Accuracy: {}\\nError: {}\\nRecall: {}\\nPrecision: {}'.format(self.accuracy,\r\n self.error,\r\n self.recall,\r\n self.precision))\r\n plt.title('Confusion Matrix')\r\n plt.show()\r\n return None", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n \n \n plt.title(title)\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.colorbar()\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, \"%.2f\" % cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment='center',\n color='white' if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n #based on http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n cmap=plt.cm.Blues\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n np.set_printoptions(precision=2)\n \n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, '%1.2f' % cm[i, j],\n horizontalalignment=\"center\",\n fontsize =12,\n color=\"white\" if cm[i, j] > thresh else \"black\")\n #plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes)) \n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.axis('auto')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix',cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n print('Confusion matrix')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(confusion_matrix, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(confusion_matrix, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n confusion_matrix = confusion_matrix.astype(\n 'float') / confusion_matrix.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(confusion_matrix)\n\n thresh = confusion_matrix.max() / 2.\n for i, j in itertools.product(range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1])):\n plt.text(j, i, confusion_matrix[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if confusion_matrix[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n # print(cm)\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n confusion_matrix_dir = './confusion_matrix_plots'\n if not os.path.exists(confusion_matrix_dir):\n os.mkdir(confusion_matrix_dir)\n\n plt.cla()\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"#BFD1D4\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n if normalize:\n plt.savefig(os.path.join(confusion_matrix_dir, 'normalized.jpg'))\n else:\n plt.savefig(os.path.join(confusion_matrix_dir, 'without_normalization.jpg'))", "def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):\n\n plt.figure(figsize=(10,10))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n cm = np.around(cm, decimals=2)\n cm[np.isnan(cm)] = 0.0\n print(\"Normalized confusion matrix\")\n\n else:\n print('Confusion matrix, without normalization')\n\n thresh = cm.max() / 2.\n\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n t = \"(%.2f)\"%(cm[i, j])\n #print t\n# plt.text(j, i, t,\n# horizontalalignment=\"center\",\n# color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('IOB-Confusion-Matrix-SVM.png')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n # 1. find out how many samples per class have received their correct label\n # 计算真正类别为k的样本被预测成各个类别的比例\n # e.g. 有25个样本的 true label 是 6,其中10个样本被预测为类别7,那么在混淆矩阵中 true label = 6 并且 predicted label = 7 的一个格子中的值为 0.4\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n # 2. get the precision (fraction of class-k predictions that have ground truth label k)\n # 计算预测的准确率\n # e.g. 预测为类别k的有12个,但其中只有9个的真正类别是k,那么准确率为 0.75\n # cm = cm.astype('float') / cm.sum(axis=0)[:, np.newaxis]\n \n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n # tick_marks = np.arange(len(classes))\n # plt.xticks(tick_marks, classes, rotation=45)\n # plt.yticks(tick_marks, classes)\n\n # fmt = '.2f' if normalize else 'd'\n # thresh = cm.max() / 2.\n # for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n # plt.text(j, i, format(cm[i, j], fmt),\n # horizontalalignment=\"center\",\n # color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')", "def plotConfusionMatrix(self, cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig('confusion_matrix.png')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion Matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion Matrix, without normalization')\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, weight='bold')\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if i == 0:\n plt.text(j-0.1, i+0.3, format(cm[i, j], fmt), color=\"white\" if cm[i, j] > thresh else \"black\")\n if i == 1:\n plt.text(j-0.1, i-0.2, format(cm[i, j], fmt), color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True Label', weight='bold')\n plt.xlabel('Predicted Label', weight='bold')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n #pdb.set_trace()\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def sam_plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n plots_dims = itertools.product(list(range(cm.shape[0])),\n list(range(cm.shape[1])))\n for i, j in plots_dims:\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \n print(a)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0.0, vmax=1.0)\n\n plt.title(title)\n\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.3f'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n # plt.title(title)\n # plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n # Tweak spacing to prevent clipping of tick-labels\n plt.subplots_adjust(bottom=0.2)", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n # print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.xlabel('Predicted label') \n plt.ylabel('True label') \n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label') \n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Purples):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n # plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n # plt.grid('off')\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=True,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n# print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n return plt.gcf()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title + \"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(title + ' confusion matrix')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=None):\n if normalize:\n # cm = cm.T\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n # cm = cm.T\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure(figsize=(4, 4))\n plt.imshow(cm, interpolation='nearest', cmap=cmap or plt.cm.Blues)\n plt.title(('Normalized ' if normalize else '') + title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(list(range(cm.shape[0])), list(range(cm.shape[1]))):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n #print(\"Normalized confusion matrix\")\n else:\n 1#print('Confusion matrix, without normalization')\n\n #print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, fontsize=14)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title,fontsize=20)\n# plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, fontsize=15)\n plt.yticks(tick_marks, classes,rotation=30,fontsize=15)\n\n fmt = '.2f'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",fontsize=20)\n\n plt.tight_layout()\n plt.ylabel('True label',fontsize=20)\n plt.xlabel('Predicted label',fontsize=20)", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Confusion matrix\")\n else:\n print('Confusion matrix')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Greens):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, '%.02f'%cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"red\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title.split('/')[-1])\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n if title:\n plt.savefig(title+'.png')\n\n plt.close()", "def plot_confusion_matrix(cm, classes=[],\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.figure()\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n fmt = '.1f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n plt.savefig('Logistik.png')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='',\n cmap=plt.cm.Blues, file_name='cm_plot'):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n plt.rcParams[\"font.size\"] = FONT_SIZE\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n\n fmt = '.6f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label', fontsize=FONT_SIZE)\n plt.xlabel('Predicted label', fontsize=FONT_SIZE)\n plt.subplots_adjust(bottom=0.13)\n with PdfPages(file_name) as pdf:\n pdf.savefig()\n plt.close()", "def plot_confusion_matrix(self):\r\n interp = ClassificationInterpretation.from_learner(self.learn)\r\n interp.plot_confusion_matrix()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n# print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes=None, normalize=False,\n title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n\n if classes:\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.show()", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(cm)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(cm)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots(figsize=(8, 8))\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes)\n ax.set_title(title,size = 20)\n ax.set_ylabel('True label',size = 20)\n ax.set_xlabel('Predicted label',size = 20)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\",size = 18)\n plt.setp(ax.get_yticklabels(),size = 18)\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n name = OUTFOLDER + \"/confusion_matrix_batch%d_layers%d_epochs%d_f1%d\" % (BATCH_SIZE,LAYERS,EPOCHS,f1_mean_test*100)\n if normalize:\n name = name + \"_norm\"\n plt.savefig(name)\n plt.close()\n return ax", "def plot_confusion_matrix(cm, classes,\n\t\t\t\t\t\t normalize=False,\n\t\t\t\t\t\t title='Confusion matrix',\n\t\t\t\t\t\t cmap=plt.cm.Blues):\n\tif normalize:\n\t\tcm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\t\tprint(\"Normalized confusion matrix\")\n\telse:\n\t\tprint('Confusion matrix, without normalization')\n\n\tprint(cm)\n\n\tplt.imshow(cm, interpolation='nearest', cmap=cmap)\n\tplt.title(title)\n\tplt.colorbar()\n\ttick_marks = np.arange(len(classes))\n\tplt.xticks(tick_marks, classes, rotation=45)\n\tplt.yticks(tick_marks, classes)\n\n\tfmt = '.2f' if normalize else 'd'\n\tthresh = cm.max() / 2.\n\tfor i, j in product(range(cm.shape[0]), range(cm.shape[1])):\n\t\tplt.text(j, i, format(cm[i, j], fmt),\n\t\t\t\t horizontalalignment=\"center\",\n\t\t\t\t color=\"white\" if cm[i, j] > thresh else \"black\")\n\n\tplt.tight_layout()\n\tplt.ylabel('True label')\n\tplt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('../results/conf_matr.png')\n\n return cm", "def plot_confusion_matrix(cm, classes,\n\t\t\t\t\t\t\t normalize=False,\n\t\t\t\t\t\t\t title='Confusion matrix',\n\t\t\t\t\t\t\t cmap=plt.cm.Blues):\n\t\tif normalize:\n\t\t\tcm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\t\t\tprint(\"Normalized confusion matrix\")\n\t\telse:\n\t\t\tprint('Confusion matrix, without normalization')\n\n\t\tplt.imshow(cm, interpolation='nearest', cmap=cmap)\n\t\tplt.title(title)\n\t\tplt.colorbar()\n\t\ttick_marks = np.arange(len(classes))\n\t\tplt.xticks(tick_marks, classes, rotation=45)\n\t\tplt.yticks(tick_marks, classes)\n\n\t\tplt.tight_layout()\n\t\tplt.ylabel('True label')\n\t\tplt.xlabel('Predicted label')", "def plot(self):\n plt.imshow(self.cm, interpolation='nearest', cmap=self.cmap)\n plt.title(self.title)\n plt.colorbar()\n tick_marks = np.arange(len(self.classes))\n plt.xticks(tick_marks, self.classes, rotation=45)\n plt.yticks(tick_marks, self.classes)\n \n if self.normalize:\n self.cm = self.cm.astype('float') / self.cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(self.cm)\n \n thresh = self.cm.max() / 2.\n for i, j in itertools.product(range(self.cm.shape[0]), range(self.cm.shape[1])):\n plt.text(j, i, self.cm[i, j], horizontalalignment=\"center\", color=\"white\" if self.cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True Label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()" ]
[ "0.81925154", "0.8093376", "0.8027219", "0.80169857", "0.7991475", "0.7988959", "0.79785", "0.7973596", "0.7959107", "0.7956459", "0.793519", "0.79322356", "0.79320693", "0.79289055", "0.79240024", "0.79221827", "0.792161", "0.79209834", "0.7920566", "0.7919846", "0.791543", "0.79137236", "0.790618", "0.79049104", "0.79044825", "0.79025066", "0.7898116", "0.7898004", "0.789432", "0.7892489", "0.7891435", "0.78889793", "0.7888174", "0.78870565", "0.7884266", "0.78821903", "0.78817356", "0.7874553", "0.7872119", "0.78674823", "0.78578603", "0.78547573", "0.78544545", "0.78507525", "0.7850049", "0.78468275", "0.78456753", "0.7845161", "0.7843197", "0.7841122", "0.7841122", "0.7841122", "0.7841122", "0.7841122", "0.7841122", "0.7841122", "0.7841122", "0.7840373", "0.7834588", "0.7834565", "0.7834464", "0.78332764", "0.78299284", "0.7829193", "0.7827706", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.78264207", "0.7820171", "0.78197736", "0.78185153", "0.78185153", "0.7817152", "0.7814989", "0.78134257", "0.7813374", "0.78130394", "0.7812085", "0.7812085", "0.7812085", "0.7812085", "0.7812085", "0.7812085", "0.7812085", "0.7812085", "0.78114337" ]
0.0
-1
Decompose transformation matrix into parts
def trans_matrix_inv(m:numpy.ndarray): was2d = False if m.shape[1] == 3: was2d = True m = numpy.asarray([ [1.0, 0.0, 0.0, 0.0], [0.0, m[0,0], m[0,1], m[0,2]], [0.0, m[1,0], m[1,1], m[1,2]], [0.0, 0.0, 0.0, 1.0]], numpy.float64) trans = m[0:3,3] rotate = numpy.zeros(3, numpy.float64) r = m[0:3,0:3] rc = numpy.linalg.cholesky(numpy.matmul(r.T, r)).T scale = numpy.diagonal(rc) if numpy.linalg.det(r) < 0.0: scale[0] *= -1.0 rcd = rc * numpy.eye(3, dtype=numpy.float64) rc = numpy.linalg.solve(rcd, rc) shear = numpy.asarray([rc[0,1], rc[0,2], rc[1,2]], numpy.float64) r0 = trans_matrix({'rotate': rotate, 'scale': scale, 'shear': shear})[0:3,0:3] r0 = numpy.linalg.solve(numpy.linalg.inv(r), numpy.linalg.inv(r0)) rotate[1] = numpy.arcsin(_frone(r0[0,2])) if numpy.abs((numpy.abs(rotate[1]) - (numpy.pi / 2.0))) < 1.0e-6: rotate[0] = 0.0 rotate[2] = numpy.arctan2(-_frone(r0[1,0]), _frone(-r0[2,0] / r0[0,2])) else: rc = numpy.cos(rotate[1]) rotate[0] = numpy.arctan2(_frone(r0[1,2] / rc), _frone(r0[2,2] / rc)) rotate[2] = numpy.arctan2(_frone(r0[0,1] / rc), _frone(r0[0,0] / rc)) if was2d: trans = trans[1:] rotate = rotate[0:1] scale = scale[1:] shear = shear[2:3] return (trans, rotate, scale, shear)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xform_from_transformation_matrix(transformation_matrix):\n transform = Transform(1.0)\n for i in range(0, 4):\n for j in range(0, 4):\n transform[i, j] = transformation_matrix[i][j]\n return transform", "def _derive_transformation_matrices(self):\n\n if hasattr(self, '_primaries') and hasattr(self, '_whitepoint'):\n if self._primaries is not None and self._whitepoint is not None:\n npm = normalised_primary_matrix(self._primaries,\n self._whitepoint)\n\n self._derived_RGB_to_XYZ_matrix = npm\n self._derived_XYZ_to_RGB_matrix = np.linalg.inv(npm)", "def extractMatrix(self,groups):\n a = float(groups[0])\n b = float(groups[1])\n c = float(groups[2])\n d = float(groups[3])\n e = float(groups[4])\n f = float(groups[5])\n self.matrix=[[a,c,e], [b,d,f]]\n self.translateX = e\n self.translateY = f\n self.scaleX = math.sqrt(a**2+c**2)\n self.scaleY = math.sqrt(b**2+d**2)\n self.rotate = math.atan2(b,d)", "def decompose(self):\r\n dummy = self.ortho()\r\n dummy.setRow(3,_vec4(0.0, 0.0, 0.0, 1.0))\r\n\r\n x = dummy.getColumn(0)\r\n y = dummy.getColumn(1)\r\n z = dummy.getColumn(2)\r\n xl = x.length()\r\n yl = y.length()\r\n zl = z.length()\r\n scale = _vec3(xl,yl,zl)\r\n \r\n x/=xl\r\n y/=yl\r\n z/=zl\r\n dummy.setColumn(0,x)\r\n dummy.setColumn(1,y)\r\n dummy.setColumn(2,z)\r\n if dummy.determinant()<0.0:\r\n dummy.setColumn(0,-x)\r\n scale.x=-scale.x\r\n\r\n return (_vec3(self.mlist[3], self.mlist[7], self.mlist[11]),\r\n dummy,\r\n scale)", "def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M", "def decompose(self,graph):\n x=[graph[i][0] for i in range(len(graph))]\n y=[graph[i][1] for i in range(len(graph))]\n return self.transform(x)+self.transform(y)", "def separate_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.result[row] = self.matrix[row][-1]\r\n self.matrix[row].pop()", "def transformation(T,M):\n\n n = len(M) # Nb de lignes\n p = len(M[0]) # Nb de colonnes\n\n x1, y1 = vecteur_image(T,0,n)\n x2, y2 = vecteur_image(T,p,n)\n x3, y3 = vecteur_image(T,p,0)\n\n xmin = round(min(0,x1,x2,x3))\n xmax = round(max(0,x1,x2,x3))\n ymin = round(min(0,y1,y2,y3))\n ymax = round(max(0,y1,y2,y3))\n\n pp = xmax-xmin\n nn = ymax-ymin\n\n Tinv = inverse_matrice(T)\n\n N = [[0 for jj in range(pp)] for ii in range(nn)]\n for ii in range(nn):\n for jj in range(pp):\n j, i = vecteur_image(Tinv,jj+xmin,ii+ymin)\n j, i = floor(j), floor(i)\n if (0 <= i < n) and (0 <= j < p):\n N[ii][jj] = M[i][j] \n else:\n N[ii][jj] = 0\n\n return N", "def transpose():", "def _convert_matrix(m):\n\n return [m[0][0], m[0][1], m[0][2], m[0][3],\n m[2][0], m[2][1], m[2][2], m[2][3],\n -m[1][0], -m[1][1], -m[1][2], -m[1][3],\n m[3][0], m[3][1], m[3][2], m[3][3]]", "def __read_transformation_matrix__(self, root_xml):\n # Try to find the node first\n node = root_xml.find(\"LPStoIJKTransformationMatrix\")\n if node is None:\n return None\n m = []\n temp = []\n for coord in node.findall(\"value\"):\n temp.append(float(coord.text))\n\n # Convert to a 4x4 list\n for i in range (4):\n m.append([temp[i*4], temp[i*4+1], temp[i*4+2], temp[i*4+3]])\n return m", "def translation_from_matrix(matrix):\r\n return numpy.array(matrix, copy=False)[:3, 3].copy()", "def get_matrix_list(transform):\n c_y = np.cos(np.radians(transform[5]))\n s_y = np.sin(np.radians(transform[5]))\n c_r = np.cos(np.radians(transform[3]))\n s_r = np.sin(np.radians(transform[3]))\n c_p = np.cos(np.radians(transform[4]))\n s_p = np.sin(np.radians(transform[4]))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = transform[0]\n matrix[1, 3] = transform[1]\n matrix[2, 3] = transform[2]\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n origin = np.array(transform[:3])\n return matrix, origin", "def transpose(m):\n\n pass", "def decompose_essential_matrix(E, x1, x2):\n\n # Fix left camera-matrix\n Rl = np.eye(3)\n tl = np.array([[0, 0, 0]]).T\n Pl = np.concatenate((Rl, tl), axis=1)\n\n # TODO: Compute possible rotations and translations\n \n # s must be [1, 1, 0]\n u, s, vh = np.linalg.svd(E)\n E = u @ np.diag([1, 1, 0]) @ vh\n u, s, vh = np.linalg.svd(E)\n\n w = np.array([[ 0, 1, 0], \n [-1, 0, 0], \n [ 0, 0, 1]]) \n \n z = np.array([[ 0, -1, 0], \n [ 1, 0, 0],\n [ 0, 0, 0]])\n \n R1 = u @ w.T @ vh\n s1 = -u @ z @ u.T\n R2 = u @ w @ vh\n s2 = u @ z @ u.T\n\n t1 = np.array([[s1[2, 1]], \n [s1[0, 2]],\n [s1[1, 0]]])\n \n t2 = np.array([[s2[2, 1]], \n [s2[0, 2]], \n [s2[1, 0]]]) \n\n # Four possibilities\n Pr = [np.concatenate((R1, t1), axis=1),\n np.concatenate((R1, t2), axis=1),\n np.concatenate((R2, t1), axis=1),\n np.concatenate((R2, t2), axis=1)]\n\n # Compute reconstructions for all possible right camera-matrices\n X3Ds = [infer_3d(x1[:, 0:1], x2[:, 0:1], Pl, x) for x in Pr]\n\n # Compute projections on image-planes and find when both cameras see point\n test = [np.prod(np.hstack((Pl @ np.vstack((X3Ds[i], [[1]])), Pr[i] @ np.vstack((X3Ds[i], [[1]])))) > 0, 1) for i in\n range(4)]\n test = np.array(test)\n idx = np.where(np.hstack((test[0, 2], test[1, 2], test[2, 2], test[3, 2])) > 0.)[0][0]\n\n # Choose correct matrix\n Pr = Pr[idx]\n\n return Pl, Pr", "def transform(self, transformer):\n\t\tnew_matrix = Matrix(self.dims)\n\t\tnew_matrix.data = [transformer(copy.deepcopy(c)) for c in self.data]\n\t\treturn new_matrix", "def _init_transformation_matrix(self):\n # Set up basic transformation matrix\n c_transform = np.zeros((self.n_beads, self.n_beads))\n\n # Get auxiliary array with bead indices\n n = np.arange(1, self.n_beads + 1)\n\n # for k = 0\n c_transform[0, :] = 1.0\n\n for k in range(1, self.n_beads // 2 + 1):\n c_transform[k, :] = np.sqrt(2) * np.cos(2 * np.pi * k * n / self.n_beads)\n\n for k in range(self.n_beads // 2 + 1, self.n_beads):\n c_transform[k, :] = np.sqrt(2) * np.sin(2 * np.pi * k * n / self.n_beads)\n\n if self.n_beads % 2 == 0:\n c_transform[self.n_beads // 2, :] = (-1) ** n\n\n # Since matrix is initialized as C(k,n) does not need to be transposed\n c_transform /= np.sqrt(self.n_beads)\n c_transform = torch.from_numpy(c_transform)\n\n return c_transform", "def compute_trans_matrix( self, n_components ):\n matrix = np.zeros((n_components,n_components))\n matrix[-1,-1] = 1.\n\n for i in range(0,matrix.shape[0]-1):\n matrix[i,i:i+2] = [0.5,0.5]\n\n return matrix", "def matPart(mat, rs, re, cs, ce):\n return [[matGet(mat,x,y) for y in range(cs,ce)] \\\n for x in range(rs,re)]", "def _decompose_rotation(self, cmd):\n \n axis = None\n angle = 0\n gate_name = str(cmd.gate)\n\n if \"Rz\" in gate_name:\n axis = 'z'\n elif \"Rx\" in gate_name:\n axis = 'x'\n elif \"Ry\" in gate_name:\n axis = 'y' \n\n angle = gate_name[gate_name.find(\"(\")+1:gate_name.find(\")\")]\n\n decomposition = subprocess.check_output(\"./gridsynth \" + angle, shell=True)[:-1]\n new_sequence = self._process_decomposition(str(decomposition),cmd.qubits[0])\n return new_sequence", "def _transform(self, matrix):\n for x in list(self.keys()):\n ar = self[x]\n if len(ar.shape) == 2 and ar.shape[1] == 3:\n self[x] = np.dot(matrix, ar.transpose()).transpose()", "def process(self, mat):", "def affine_transform(geom, matrix):\n if geom.is_empty:\n return geom\n if len(matrix) == 6:\n ndim = 2\n a, b, d, e, xoff, yoff = matrix\n if geom.has_z:\n ndim = 3\n i = 1.0\n c = f = g = h = zoff = 0.0\n matrix = a, b, c, d, e, f, g, h, i, xoff, yoff, zoff\n elif len(matrix) == 12:\n ndim = 3\n a, b, c, d, e, f, g, h, i, xoff, yoff, zoff = matrix\n if not geom.has_z:\n ndim = 2\n matrix = a, b, d, e, xoff, yoff\n else:\n raise ValueError(\"'matrix' expects either 6 or 12 coefficients\")\n\n def affine_pts(pts):\n \"\"\"Internal function to yield affine transform of coordinate tuples\"\"\"\n if ndim == 2:\n for x, y in pts:\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n yield (xp, yp)\n elif ndim == 3:\n for x, y, z in pts:\n xp = a * x + b * y + c * z + xoff\n yp = d * x + e * y + f * z + yoff\n zp = g * x + h * y + i * z + zoff\n yield (xp, yp, zp)\n\n # Process coordinates from each supported geometry type\n if geom.type in ('Point', 'LineString', 'LinearRing'):\n return type(geom)(list(affine_pts(geom.coords)))\n elif geom.type == 'Polygon':\n ring = geom.exterior\n shell = type(ring)(list(affine_pts(ring.coords)))\n holes = list(geom.interiors)\n for pos, ring in enumerate(holes):\n holes[pos] = type(ring)(list(affine_pts(ring.coords)))\n return type(geom)(shell, holes)\n elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':\n # Recursive call\n # TODO: fix GeometryCollection constructor\n return type(geom)([affine_transform(part, matrix)\n for part in geom.geoms])\n else:\n raise ValueError('Type %r not recognized' % geom.type)", "def transform(self, image_matrix):\n\n # Centering the data\n mean = np.mean(image_matrix, axis=0)\n image_matrix = image_matrix - mean\n\n # Dimension reduction is done by multiplying the original matrix with the components\n transformed_matrix = np.dot(image_matrix, self.components.T)\n return transformed_matrix", "def extractTranslate(self,groups):\n self.translateX = float(groups[0])\n self.translateY = float(groups[0])\n if len(groups) == 2 and groups[1]:\n self.translateY = float(groups[1])\n self.matrix = [[1.0, 0.0, self.translateX], \\\n [0.0, 1.0, self.translateY]]", "def to_matrix_vector(transform):\n ndimin = transform.shape[0] - 1\n ndimout = transform.shape[1] - 1\n matrix = transform[0:ndimin, 0:ndimout]\n vector = transform[0:ndimin, ndimout]\n return matrix, vector", "def decompose(self, idx):\n idx = self.robotize(idx)\n return (idx // self.col_count, idx % self.col_count)", "def transformation_matrix(self, s1, s2, s3, t1, t2, t3):\n\n s1 = np.array(s1)\n s2 = np.array(s2)\n s3 = np.array(s3)\n t1 = np.array(t1)\n t2 = np.array(t2)\n t3 = np.array(t3)\n\n Q = np.array(\n [\n [t2[0] - t1[0], t2[1] - t1[1], t2[2] - t1[2]],\n [t3[0] - t1[0], t3[1] - t1[1], t3[2] - t1[2]],\n ]\n )\n\n P = np.array([[s2[0] - s1[0], s2[1] - s1[1]], [s3[0] - s1[0], s3[1] - s1[1]]])\n\n try:\n # Invert the P matrix\n Pinv = inv(P)\n\n # Build the dot product\n T = np.dot(Pinv, Q)\n\n # Offset\n V0 = np.subtract(t2, np.transpose(s2[0:2]).dot(T))\n except Exception as e:\n self.log.error(\"An error occured during the transformation.\", exc_info=True)\n return -1, -1\n\n return T, V0", "def _decompose(self, reg):\n raise NotImplementedError('No decomposition available: {}'.format(self))", "def transform():", "def partial_transpose(matrix):\n size = len(matrix)\n res_matrix = np.zeros((size,) * 4, dtype=complex)\n for p1 in range(size):\n for p2 in range(size):\n for p1_ in range(size):\n for p2_ in range(size):\n res_matrix[p1, p2, p1_, p2_] = matrix[p1, p2_, p1_, p2]\n return res_matrix", "def get_transformation_matrix(self, fromFrame, toFrame):\n fromIndex = self.frameNames.index(fromFrame)\n toIndex = self.frameNames.index(toFrame)\n #return get_transformation_matrix(self.frameStack, fromIndex, toIndex)\n return self._get_transformation_matrix_with_indices(fromIndex, toIndex)", "def get_ee_transform_matrices(self):\n translation_mat = np.zeros((4, 4))\n rotation_mat = np.zeros((4, 4))\n\n with open(directory + '/EE_to_Palm_Translation_Matrix.csv', newline='') as f:\n reader = csv.reader(f)\n for j, row in enumerate(reader):\n for i, col in enumerate(row):\n translation_mat[j][i] = float(col)\n\n with open(directory + '/EE_to_Palm_Rotation_Matrix.csv', newline='') as f:\n reader = csv.reader(f)\n for j, row in enumerate(reader):\n for i, col in enumerate(row):\n rotation_mat[j][i] = float(col)\n\n return translation_mat, rotation_mat", "def separa(matrix):\r\n linha, coluna = matrix.shape\r\n linha2, coluna2 = linha//2, coluna//2\r\n return matrix[:linha2, :coluna2], matrix[:linha2, coluna2:], matrix[linha2:, :coluna2], matrix[linha2:, coluna2:]", "def decompose(xform, angles=True, shears=False):\n\n # The inline comments in the code below are taken verbatim from\n # the referenced article, [except for notes in square brackets].\n\n # The next step is to extract the translations. This is trivial;\n # we find t_x = M_{4,1}, t_y = M_{4,2}, and t_z = M_{4,3}. At this\n # point we are left with a 3*3 matrix M' = M_{1..3,1..3}.\n xform = np.array(xform).T\n\n if xform.shape == (4, 4):\n translations = xform[ 3, :3]\n xform = xform[:3, :3]\n else:\n translations = np.array([0, 0, 0])\n\n M1 = xform[0]\n M2 = xform[1]\n M3 = xform[2]\n\n # The process of finding the scaling factors and shear parameters\n # is interleaved. First, find s_x = |M'_1|.\n sx = np.sqrt(np.dot(M1, M1))\n M1 = M1 / sx\n\n # Then, compute an initial value for the xy shear factor,\n # s_xy = M'_1 * M'_2. (this is too large by the y scaling factor).\n sxy = np.dot(M1, M2)\n\n # The second row of the matrix is made orthogonal to the first by\n # setting M'_2 = M'_2 - s_xy * M'_1.\n M2 = M2 - sxy * M1\n\n # Then the y scaling factor, s_y, is the length of the modified\n # second row.\n sy = np.sqrt(np.dot(M2, M2))\n\n # The second row is normalized, and s_xy is divided by s_y to\n # get its final value.\n M2 = M2 / sy\n sxy = sxy / sx\n\n # The xz and yz shear factors are computed as in the preceding,\n sxz = np.dot(M1, M3)\n syz = np.dot(M2, M3)\n\n # the third row is made orthogonal to the first two rows,\n M3 = M3 - sxz * M1 - syz * M2\n\n # the z scaling factor is computed,\n sz = np.sqrt(np.dot(M3, M3))\n\n # the third row is normalized, and the xz and yz shear factors are\n # rescaled.\n M3 = M3 / sz\n sxz = sxz / sx\n syz = syz / sy\n\n # The resulting matrix now is a pure rotation matrix, except that it\n # might still include a scale factor of -1. If the determinant of the\n # matrix is -1, negate the matrix and all three scaling factors. Call\n # the resulting matrix R.\n #\n # [We do things different here - if the rotation matrix has negative\n # determinant, the flip is encoded in the x scaling factor.]\n R = np.array([M1, M2, M3])\n if linalg.det(R) < 0:\n R[0] = -R[0]\n sx = -sx\n\n # Finally, we need to decompose the rotation matrix into a sequence\n # of rotations about the x, y, and z axes. [This is done in the\n # rotMatToAxisAngles function]\n if angles: rotations = rotMatToAxisAngles(R.T)\n else: rotations = R.T\n\n retval = [np.array([sx, sy, sz]), translations, rotations]\n\n if shears:\n retval.append(np.array((sxy, sxz, syz)))\n\n return tuple(retval)", "def get_4x4_transform(scale_x, scale_y, trans_x, trans_y, trans_z):\r\n transform = [[scale_x, 0.0, 0.0, trans_x],\r\n [0.0, scale_y, 0.0, trans_y],\r\n [0.0, 0.0, 1.0, trans_z],\r\n [0.0, 0.0, 0.0, 1.0]]\r\n return transform", "def dim_reduction(data_set, components):\n transformed = []\n index = -1\n transformed = data_set @ components\n return transformed", "def transpose(self):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n return mat4(m11,m21,m31,m41,\r\n m12,m22,m32,m42,\r\n m13,m23,m33,m43,\r\n m14,m24,m34,m44)", "def transform(self, x: Array2D) -> Array2D:", "def secondSubIter(validateMatrix):\n assert np.ndim(validateMatrix) == 3\n firstTransition = _rot3D90(_rot3D90(validateMatrix, 'y', 2), 'x', 3)\n listedMatrix = list(np.reshape(firstTransition, 27))\n del(listedMatrix[13])\n val1 = _getTempsDelexpression(listedMatrix)\n # str1 = ''.join(str(e) for e in listedMatrix)\n return val1, listedMatrix", "def _compose_transforms(basis_transforms, source_basis, source_dag):\n example_gates = _get_example_gates(source_dag)\n mapped_instrs = {}\n\n for gate_name, gate_num_qubits in source_basis:\n # Need to grab a gate instance to find num_qubits and num_params.\n # Can be removed following https://github.com/Qiskit/qiskit-terra/pull/3947 .\n example_gate = example_gates[gate_name, gate_num_qubits]\n num_params = len(example_gate.params)\n\n placeholder_params = ParameterVector(gate_name, num_params)\n placeholder_gate = Gate(gate_name, gate_num_qubits, list(placeholder_params))\n placeholder_gate.params = list(placeholder_params)\n\n dag = DAGCircuit()\n qr = QuantumRegister(gate_num_qubits)\n dag.add_qreg(qr)\n dag.apply_operation_back(placeholder_gate, qr[:], [])\n mapped_instrs[gate_name, gate_num_qubits] = placeholder_params, dag\n\n for gate_name, gate_num_qubits, equiv_params, equiv in basis_transforms:\n logger.debug(\n \"Composing transform step: %s/%s %s =>\\n%s\",\n gate_name,\n gate_num_qubits,\n equiv_params,\n equiv,\n )\n\n for mapped_instr_name, (dag_params, dag) in mapped_instrs.items():\n doomed_nodes = [\n node\n for node in dag.op_nodes()\n if (node.op.name, node.op.num_qubits) == (gate_name, gate_num_qubits)\n ]\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updating transform for mapped instr %s %s from \\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n for node in doomed_nodes:\n\n replacement = equiv.assign_parameters(\n dict(zip_longest(equiv_params, node.op.params))\n )\n\n replacement_dag = circuit_to_dag(replacement)\n\n dag.substitute_node_with_dag(node, replacement_dag)\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updated transform for mapped instr %s %s to\\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n return mapped_instrs", "def _untransform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover", "def get_transformation_matrix(self):\n\n s = self.sin()\n c = self.cos()\n return np.array(\n [\n c ** 2,\n c * s,\n -(c ** 2),\n -c * s,\n c * s,\n s ** 2,\n -c * s,\n -(s ** 2),\n -(c ** 2),\n -c * s,\n c ** 2,\n c * s,\n -c * s,\n -(s ** 2),\n c * s,\n s ** 2,\n ],\n dtype=np.float64,\n ).reshape(4, 4)", "def get_transformation_matrix(theta=45):\n\n theta = theta/360 * 2 * np.pi # in radians\n hx = np.cos(theta)\n sy = np.sin(theta)\n\n S = np.array([[1, hx, 0],\n [0, sy, 0],\n [0, 0, 1]])\n #S_inv = np.linalg.inv(S)\n #old_coords = np.array([[2, 2, 1], [6, 6, 1]]).T\n #new_coords = np.matmul(S, old_coords)\n #recovered_coords = np.matmul(S_inv, new_coords)\n #print('new coords: ', new_coords)\n #print('recovered coords: ', recovered_coords)\n return S", "def apply_dof_transformation_to_transpose_prism(\n entity_transformations, entity_dofs, data, cell_info\n):\n apply_dof_transformation_to_transpose(3, 9, 5, entity_transformations, entity_dofs,\n data, cell_info, List([\"triangle\"] + [\"quadrilateral\"] * 4 + [\"triangle\"]))", "def forward_transform(self, matrix):\n\n x = matrix.shape[0]\n y = matrix.shape[1]\n N = x\n\n #Fourier Transform matrix:\n ft = np.zeros([x,y], complex)\n count =0\n for u in range(0, x):\n for v in range(0, y):\n sum_ft = 0\n for i in range(0, x):\n for j in range(0, y):\n\n sum_ft = sum_ft + matrix[i, j] * (np.cos(((2*np.pi)/N)*(u*i + v*j)) - 1j*np.sin(((2*np.pi)/N)*(u*i + v*j)))\n\n ft[u, v] = sum_ft\n\n #print(u, v)\n if u != 0 and v != 0 and (u <= int(x/2) and v < int(y/2)):\n\n ft[(x - u), (y - v)] = np.real(ft[u, v]) - np.imag(ft[u, v] * 1j)\n count = count + 2\n \n if count == x*y:\n return ft\n return ft", "def prepare_texture_matrix(self):\n\t\ttexture_matrix = self.normalize_data()\n\t\tlabels = MatrixCreation().independent_variable_labels()\n\t\t\n\t\tcolumns_to_be_deleted = []\n\n\t\tfor key, label in labels.items():\n\t\t\tif label not in self.principal_components:\n\t\t\t\tcolumns_to_be_deleted.append(key)\n\n\t\ttexture_matrix = numpy.delete(texture_matrix,columns_to_be_deleted,axis=1)\n\n\t\treturn texture_matrix", "def _parse_transformations(lines):\n # Each transformation requires 3 lines for the (x,y,z) components\n if len(lines) % 3 != 0:\n raise InvalidFileError(\"Invalid number of transformation vectors\")\n n_transformations = len(lines) // 3\n\n rotations = np.zeros((n_transformations, 3, 3), dtype=float)\n translations = np.zeros((n_transformations, 3), dtype=float)\n\n transformation_i = 0\n component_i = 0\n for line in lines:\n # The first two elements (component and\n # transformation index) are not used\n transformations = [float(e) for e in line.split()[2:]]\n if len(transformations) != 4:\n raise InvalidFileError(\n \"Invalid number of transformation vector elements\"\n )\n rotations[transformation_i, component_i, :] = transformations[:3]\n translations[transformation_i, component_i] = transformations[3]\n\n component_i += 1\n if component_i == 3:\n # All (x,y,z) components were parsed\n # -> head to the next transformation \n transformation_i += 1\n component_i = 0\n \n return rotations, translations", "def transform(self,X):\n # conver X to a list\n X = X.tolist()\n result = []\n\n # iterate over the length of X\n for b in range(len(X)):\n\n # change dataset accoring to bias\n if self.include_bias:\n X[b].insert(0, 1)\n \n # initialize an array to store dynamically all array of indices\n init_arr = []\n for j in range(len(X[b])):\n init_arr.append([j])\n\n # array of indices\n arr = [j for j in range(len(X[b]))]\n separate_arr = init_arr.copy()\n\n # iterate for the degree given\n for k in range(0,self.degree-1):\n # for len of the array containing indices\n for i in range(len(arr)):\n temp = i\n # this loop will have different length since length increases\n for j in range((k)*len(arr),len(separate_arr)):\n element = init_arr[j].copy()\n element.append(temp)\n init_arr.append(element) \n separate_arr = init_arr.copy()\n # sort the array obtained to remove repeated elements\n array = []\n for m in range(len(init_arr)):\n init_arr[m].sort()\n if(init_arr[m] not in array):\n array.append(init_arr[m])\n\n # calculate the final values by multiplying the numbers or columns at the place of indices\n final = []\n for i in array:\n lst = []\n # only if lenth satisfies the given degree\n if len(i)==self.degree:\n for j in i: \n lst.append(X[b][j]) \n final.append(np.product(lst))\n result.append(final)\n return result", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def complexDecompose(self,graph):\n z=[complex(*graph[i]) for i in range(len(graph))]\n return self.complexTransform(z)", "def transformation_2d(vertices, kernels=KERNELS):\n\t# calculate the transpose matrix of vertices\n\ttranspose = vertices.transpose()\n\t# insert a row of ones in the transpose matrix's end, then insert the result in 'matrices' list\n\tkernels.append(np.append(transpose, [np.ones(len(transpose[0]))], axis=0))\n\t# multiply matrices into 'kernels' list,\n\t# remove the last row (of ones) and calculate the transpose matrix of the result\n\tfinal_transformation_result = np.delete(np.linalg.multi_dot(kernels), 2, 0).transpose()\n\tKERNELS.clear()\n\treturn final_transformation_result", "def output_reshape(ct):\n return np.moveaxis(ct, 1, -1)", "def transform_mat(matrix):\n delta = 1e-5\n matrix = matrix + delta\n return matrix", "def get_perspective_transform(src, dst):\n if not isinstance(src, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(src)))\n\n if not isinstance(dst, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(dst)))\n\n if not src.shape[-2:] == (4, 2):\n raise ValueError(\"Inputs must be a Bx4x2 tensor. Got {}\".format(src.shape))\n\n if not src.shape == dst.shape:\n raise ValueError(\"Inputs must have the same shape. Got {}\".format(dst.shape))\n\n if not (src.shape[0] == dst.shape[0]):\n raise ValueError(\n \"Inputs must have same batch size dimension. Expect {} but got {}\".format(src.shape, dst.shape)\n )\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n p = []\n for i in [0, 1, 2, 3]:\n p.append(_build_perspective_param(src[:, i], dst[:, i], 'x'))\n p.append(_build_perspective_param(src[:, i], dst[:, i], 'y'))\n\n # A is Bx8x8\n A = torch.stack(p, dim=1)\n\n # b is a Bx8x1\n b = torch.stack(\n [\n dst[:, 0:1, 0],\n dst[:, 0:1, 1],\n dst[:, 1:2, 0],\n dst[:, 1:2, 1],\n dst[:, 2:3, 0],\n dst[:, 2:3, 1],\n dst[:, 3:4, 0],\n dst[:, 3:4, 1],\n ],\n dim=1,\n )\n\n # solve the system Ax = b\n X, LU = _torch_solve_cast(b, A)\n\n # create variable to return\n batch_size = src.shape[0]\n M = torch.ones(batch_size, 9, device=src.device, dtype=src.dtype)\n M[..., :8] = torch.squeeze(X, dim=-1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def _from_matrix(cls, matrix):\n try:\n shape = matrix.shape\n except AttributeError:\n raise TypeError(\"Invalid matrix type: Input must be a 3x3 or 4x4 numpy array or matrix\")\n\n if shape == (3, 3):\n R = matrix\n elif shape == (4,4):\n R = matrix[:-1][:,:-1] # Upper left 3x3 sub-matrix\n else:\n raise ValueError(\"Invalid matrix shape: Input must be a 3x3 or 4x4 numpy array or matrix\")\n\n # Check matrix properties\n if not np.allclose(np.dot(R, R.conj().transpose()), np.eye(3), atol=1e-6):\n raise ValueError(\"Matrix must be orthogonal, i.e. its transpose should be its inverse\")\n if not np.isclose(np.linalg.det(R), 1.0):\n print(np.linalg.det(R))\n raise ValueError(\"Matrix must be special orthogonal i.e. its determinant must be +1.0\")\n\n def decomposition_method(matrix):\n \"\"\" Method supposedly able to deal with non-orthogonal matrices - NON-FUNCTIONAL!\n Based on this method: http://arc.aiaa.org/doi/abs/10.2514/2.4654\n \"\"\"\n x, y, z = 0, 1, 2 # indices\n K = np.array([\n [R[x, x]-R[y, y]-R[z, z], R[y, x]+R[x, y], R[z, x]+R[x, z], R[y, z]-R[z, y]],\n [R[y, x]+R[x, y], R[y, y]-R[x, x]-R[z, z], R[z, y]+R[y, z], R[z, x]-R[x, z]],\n [R[z, x]+R[x, z], R[z, y]+R[y, z], R[z, z]-R[x, x]-R[y, y], R[x, y]-R[y, x]],\n [R[y, z]-R[z, y], R[z, x]-R[x, z], R[x, y]-R[y, x], R[x, x]+R[y, y]+R[z, z]]\n ])\n K = K / 3.0\n\n e_vals, e_vecs = np.linalg.eig(K)\n print('Eigenvalues:', e_vals)\n print('Eigenvectors:', e_vecs)\n max_index = np.argmax(e_vals)\n principal_component = e_vecs[max_index]\n return principal_component\n\n def trace_method(matrix):\n \"\"\"\n This code uses a modification of the algorithm described in:\n https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf\n which is itself based on the method described here:\n http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/\n\n Altered to work with the column vector convention instead of row vectors\n \"\"\"\n m = matrix.conj().transpose() # This method assumes row-vector and postmultiplication of that vector\n if m[2, 2] < 0:\n if m[0, 0] > m[1, 1]:\n t = 1 + m[0, 0] - m[1, 1] - m[2, 2]\n q = [m[1, 2]-m[2, 1], t, m[0, 1]+m[1, 0], m[2, 0]+m[0, 2]]\n else:\n t = 1 - m[0, 0] + m[1, 1] - m[2, 2]\n q = [m[2, 0]-m[0, 2], m[0, 1]+m[1, 0], t, m[1, 2]+m[2, 1]]\n else:\n if m[0, 0] < -m[1, 1]:\n t = 1 - m[0, 0] - m[1, 1] + m[2, 2]\n q = [m[0, 1]-m[1, 0], m[2, 0]+m[0, 2], m[1, 2]+m[2, 1], t]\n else:\n t = 1 + m[0, 0] + m[1, 1] + m[2, 2]\n q = [t, m[1, 2]-m[2, 1], m[2, 0]-m[0, 2], m[0, 1]-m[1, 0]]\n\n q = np.array(q)\n q *= 0.5 / sqrt(t);\n return q\n\n return cls(array=trace_method(R))", "def transform_matrix_from_line_segments(ls11,ls12,LS11,LS12):\n norm = lambda vec: (vec[0]**2 + vec[1]**2 + vec[2]**2)**0.5\n vec1 = ls12 - ls11\n vec2 = LS12 - LS11\n trans_to_origin = translateEuler(-ls11)\n temp = rotate_matrix_from_vectors(vec1, vec2)\n rot_matrix = np.zeros([4,4])\n rot_matrix[:-1, :-1] = temp\n rot_matrix[3,3] = 1\n scale_matrix = np.eye(4)*norm(vec2)/norm(vec1) \n scale_matrix[3,3] = 1\n trans_to_point = translateEuler(LS11)\n # print(trans_to_point)\n # print(rot_matrix)\n # print(trans_to_origin)\n \n return trans_to_point@scale_matrix@rot_matrix@trans_to_origin", "def transform_points(transf_matrix, points):\n if(type(points)==list):\n temp_pts = [np.array([x[0],x[1],x[2],1]) for x in points]\n newpts = []\n for pt in temp_pts:\n newpts.append((transf_matrix@pt)[:3])\n else:\n temp_pts = np.array([points[0],points[1],points[2],1])\n newpts=(transf_matrix@temp_pts)[:3]\n return newpts", "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return w, v", "def interpolate_matrix(matrix):", "def transformation_matrix(self, di=(0, 0, 0), dj=(0, 0, 0)):\n xi, xj = self.get_nodes()\n di, dj = np.asarray(di), np.asarray(dj)\n dx, dy, dz = (xj - xi) + (dj - di)\n return transformation_matrix(dx, dy, dz, self.roll)", "def fast_TRS_2d(input, transform_matrix, input_is_point=False):\n if input_is_point:\n return np.delete(np.dot(transform_matrix, np.insert(input, 2, 1)), 2)\n else:\n return np.delete(np.einsum('jk,ik->ij', transform_matrix, np.insert(input, 2, 1, axis=1)), 2, 1)", "def getTransposeMatrix(self) -> CMatrix4:\n ...", "def wc_matrix(matrix):\n return [{\"A\": position[\"T\"], \"T\": position[\"A\"], \"C\": position[\"G\"], \"G\": position[\"C\"]} for position in matrix[::-1]]", "def apply_transformation_np(source, transformation):\n source_homog = np.ones((source.shape[0], 4))\n source_homog[:, :-1] = source\n # source_homog = np.hstack(\n # (source, np.ones(source.shape[0], 1))\n # )\n\n source_transformed = np.matmul(transformation, source_homog.T).T[:, :-1]\n return source_transformed", "def turn_matrix(self, matrix):\n if matrix[0][0] == 1:\n tab = [[(matrix[-j - 1][i] + 1) % 2 for j in range(self.size)] for i in range(self.size)]\n elif matrix[0][1] == 1:\n tab = [[(matrix[i][j] + 1) % 2 for j in range(self.size)] for i in range(self.size)]\n elif matrix[1][0] == 1:\n tab = [[(matrix[i][j] + 1) % 2 for j in range(self.size)] for i in range(self.size)]\n else:\n tab = [[(matrix[j][-1 - i] + 1) % 2 for j in range(self.size)] for i in range(self.size)]\n return tab", "def transpose(self) -> None:\n ...", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def reconstruct_input(self, ix):", "def perspective_transform():\n src = np.float32([(220,720), (1110, 720), (570, 470), (722, 470)]) # Manually get these numbers from plot\n dst = np.float32([[320, 720], [920, 720], [320, 1], [920, 1]])\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n return M, Minv", "def split_matrix(X, tr = 0.5, vl = 0.25):\n X, Xv, Xt = X[:int(len(X)*tr)], X[int(len(X)*tr):int(len(X)*(tr + vl))], X[int(len(X)*(tr + vl)):]\n return X, Xv, Xt", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def fourthSubIter(validateMatrix):\n assert np.ndim(validateMatrix) == 3\n thirdTransition = _rot3D90(validateMatrix, 'x', 3)\n listedMatrix = list(np.reshape(thirdTransition, 27))\n del(listedMatrix[13])\n val1 = _getTempsDelexpression(listedMatrix)\n # str1 = ''.join(str(e) for e in listedMatrix)\n return val1, listedMatrix", "def thirdSubIter(validateMatrix):\n assert np.ndim(validateMatrix) == 3\n secondTransition = _rot3D90(_rot3D90(validateMatrix, 'x', 1), 'z', 1)\n listedMatrix = list(np.reshape(secondTransition, 27))\n del(listedMatrix[13])\n val1 = _getTempsDelexpression(listedMatrix)\n # str1 = ''.join(str(e) for e in listedMatrix)\n return val1, listedMatrix", "def get_transform(self, from_frame, to_frame):\n if not self._pipeline:\n return None\n try:\n from_ind = self._get_frame_index(from_frame)\n except ValueError:\n raise CoordinateFrameError(\"Frame {0} is not in the available \"\n \"frames\".format(from_frame))\n try:\n to_ind = self._get_frame_index(to_frame)\n except ValueError:\n raise CoordinateFrameError(\"Frame {0} is not in the available frames\".format(to_frame))\n if to_ind < from_ind:\n #transforms = np.array(self._pipeline[to_ind: from_ind], dtype=\"object\")[:, 1].tolist()\n transforms = [step.transform for step in self._pipeline[to_ind: from_ind]]\n transforms = [tr.inverse for tr in transforms[::-1]]\n elif to_ind == from_ind:\n return None\n else:\n #transforms = np.array(self._pipeline[from_ind: to_ind], dtype=\"object\")[:, 1].copy()\n transforms = [step.transform for step in self._pipeline[from_ind: to_ind]]\n return functools.reduce(lambda x, y: x | y, transforms)", "def cleanup_transition_matrix(matrix,polarization):\n\n index = []\n for i in range(len(matrix['label'])):\n if (polarization[0] == 0) & ('right' in matrix['label'][i]):\n index.append(i)\n elif (polarization[1] == 0) & ('parallel' in matrix['label'][i]):\n index.append(i)\n elif (polarization[2] == 0) & ('left' in matrix['label'][i]):\n index.append(i)\n\n for i in reversed(index):\n del matrix['label'][i]\n del matrix['bra_energy'][i]\n del matrix['ket_energy'][i]\n del matrix['matrix'][i]\n\n return matrix", "def transformation_matrix(dx, dy, dz, roll=0):\n r = rotation_matrix(dx, dy, dz, roll)\n t = np.zeros((12, 12), dtype='float')\n t[:3,:3] = t[3:6,3:6] = t[6:9,6:9] = t[9:12,9:12] = r\n return t", "def attrTransform(self, matrix, transform):\n for ttype, targs in self.reTransformFind.findall(transform):\n targs = list(map(lambda x: float(x), self.reNumberFind.findall(targs)))\n if ttype == 'matrix':\n newmatrix = [ targs[0], targs[1],\n targs[2], targs[3],\n targs[4], targs[5] ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'translate':\n tx = targs[0]\n ty = targs[1] if len(targs) > 1 else 0\n newmatrix = [ 1, 0, 0, 1, tx, ty ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'scale':\n sx = targs[0]\n sy = targs[1] if len(targs) > 1 else sx\n newmatrix = [ sx, 0, 0, sy, 0, 0 ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'rotate':\n if len(targs) == 1:\n alpha = targs[0]\n newmatrix = [ math.cos(alpha), math.sin(alpha),\n -math.sin(alpha), math.cos(alpha),\n 0, 0]\n self.matrixMul(matrix, newmatrix)\n else:\n alpha = targs[0]\n newmatrix = [ 1, 0, 0, 1, targs[1], targs[2] ]\n self.matrixMul(matrix, newmatrix)\n newmatrix = [ math.cos(alpha), math.sin(alpha),\n -math.sin(alpha), math.cos(alpha),\n 0, 0]\n self.matrixMul(matrix, newmatrix)\n newmatrix = [ 1, 0, 0, 1, -targs[1], -targs[2] ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'skewX' or ttype == 'skewY':\n self.alert(\"skewX and skewY transformations are not supported\", elem)\n else:\n print('unknown transform type: ', ttype)\n return matrix", "def decompress(self, tensors):", "def transform_points(points, transf_matrix):\n if points.shape[0] not in [3, 4]:\n raise Exception(\n \"Points input should be (3,N) or (4,N) shape, received {}\".format(\n points.shape\n )\n )\n return transf_matrix.dot(np.vstack((points[:3, :], np.ones(points.shape[1]))))[\n :3, :\n ]", "def GetTransform(self, *args) -> \"itkTransformD22 *\":\n return _itkTransformMeshFilterPython.itkTransformMeshFilterMF2MF2TD22_GetTransform(self, *args)", "def transform(self, previousimage):", "def setrans(Bi, t):\n\n x,v=mat2set(Bi)\n Bo = set2mat((x+t,v))\n Bo = Bo.astype(Bi.dtype)\n return Bo", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def from_matrix(matrix: types.Matrix) -> \"MatrixLieGroup\":", "def decompress(matrix_y, matrix_w):\n rows, cols = len(matrix_y[0]), len(matrix_y)\n result_array = np.zeros((rows, cols))\n for i in range(rows):\n for k in range(cols):\n result_array[i] = result_array[i] + matrix_w[k] * matrix_y[k][i]\n return DataFrame(result_array)", "def separate_components(imgs_ft, phases, mod_depths=None, amps=None):\n nangles, nphases, ny, nx = imgs_ft.shape\n\n # default parameters\n if mod_depths is None:\n mod_depths = np.ones((nangles, nphases))\n\n if amps is None:\n amps = np.ones((nangles, nphases))\n\n # check parameters\n if nphases != 3:\n raise NotImplementedError(\"only implemented for nphases=3, but nphases=%d\" % nphases)\n\n components_ft = np.empty((nangles, nphases, ny, nx), dtype=np.complex) * np.nan\n\n # try to do inversion\n for ii in range(nangles):\n kmat = get_kmat(phases[ii], mod_depths[ii], amps[ii])\n\n try:\n kmat_inv = np.linalg.inv(kmat)\n components_ft[ii] = mult_img_matrix(imgs_ft[ii], kmat_inv)\n\n except np.linalg.LinAlgError:\n warnings.warn(\"warning, inversion matrix for angle index=%d is singular. This data will be ignored in SIM reconstruction\" % ii)\n\n return components_ft", "def matT(mat):\n shape=matShape(mat)\n return [[matGet(mat,y,x) for y in range(shape[0])] \\\n for x in range(shape[1])]", "def get_translation_matrix2d(translations: Tensor) -> Tensor:\n transform: Tensor = eye_like(3, translations)[:, :2, :]\n transform[..., 2] += translations # tx/ty\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography(transform)\n\n return transform_h", "def to_matrix(self): \n warnings.warn(f'{self} is being reconstructed into a matrix, consider operating on the decomposed form.')\n\n full = self.to_tensor()\n if self.n_matrices == ():\n return full.reshape(self.shape)\n else:\n return full.reshape(self.n_matrices + self.shape)", "def compose_mat(rot):\n trans_mat = oMa.MTransformationMatrix()\n trans_mat.setRotation(rot)\n\n mat = trans_mat.asMatrix()\n\n return mat", "def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here", "def decompose(self, observation_matrix, method=\"smoother\"):\n if method == \"filter\":\n means = self.filtered_state_means\n else:\n means = self.smoothed_state_means\n nsdf = self.observation_matrix.shape[0]\n ncdf = self.observation_matrix.shape[1] - nsdf\n sdf_means = []\n for t, _ in enumerate(means):\n sdf_means.append(np.dot(observation_matrix[:, :nsdf],\n means[t, :nsdf]))\n cdf_means = [[]] * ncdf\n for k in range(ncdf):\n idx = nsdf + k\n for t, _ in enumerate(means):\n cdf_means[k].append(np.dot(observation_matrix[:, idx],\n means[t, idx]))\n return (sdf_means, cdf_means)", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n\n origin = np.array([location.x, location.y, location.z])\n return matrix, origin", "def T(self):\n # TODO - your code here\n matrix_transpose = [];\n \n for j in range(self.w):\n matrix_transpose.append(self.get_column(j));\n \n return Matrix(matrix_transpose);", "def trans(array,dim):\n return array[filter(lambda x: x != dim,range(len(array)) ) ]", "def decomposition(self, *args, **kwds):\n if not self.is_endomorphism():\n raise ArithmeticError(\"Matrix morphism must be an endomorphism.\")\n D = self.domain()\n E = self.matrix().decomposition(*args,**kwds)\n if D.is_ambient():\n return Sequence([D.submodule(V, check=False) for V, _ in E],\n cr=True, check=False)\n else:\n B = D.basis_matrix()\n R = D.base_ring()\n return Sequence([D.submodule((V.basis_matrix() * B).row_module(R),\n check=False) for V, _ in E],\n cr=True, check=False)", "def transMatrix( source=None ):\n if source is None:\n return None,None\n else:\n (x,y,z) = source[:3]\n if x == y == z == 0.0:\n return None, None \n return tmatrixaccel.transMatrix( x,y,z ),tmatrixaccel.transMatrix( -x, -y, -z )", "def apply_dof_transformation_to_transpose_hexahedron(\n entity_transformations, entity_dofs, data, cell_info\n):\n apply_dof_transformation_to_transpose(3, 12, 6, entity_transformations, entity_dofs,\n data, cell_info, List([\"quadrilateral\"] * 6))", "def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv", "def apply_dof_transformation_to_transpose_tetrahedron(\n entity_transformations, entity_dofs, data, cell_info\n):\n apply_dof_transformation_to_transpose(3, 6, 4, entity_transformations, entity_dofs,\n data, cell_info, List([\"triangle\"] * 4))" ]
[ "0.6168927", "0.6069243", "0.6052449", "0.6051412", "0.5990401", "0.5962151", "0.59120136", "0.58908314", "0.58383477", "0.57767695", "0.57475996", "0.57253575", "0.572462", "0.57190764", "0.56800544", "0.56527287", "0.56141245", "0.55945146", "0.55739355", "0.55634975", "0.5552715", "0.5529159", "0.5519686", "0.55169845", "0.5513936", "0.551313", "0.55013067", "0.5492254", "0.5454311", "0.5429849", "0.54278743", "0.5422676", "0.5417856", "0.541286", "0.5381523", "0.53808635", "0.53789186", "0.53766835", "0.53591305", "0.5357787", "0.53473127", "0.53461856", "0.534435", "0.53440803", "0.53327626", "0.53291774", "0.5323125", "0.531629", "0.53148365", "0.5313858", "0.5313351", "0.5306556", "0.5305269", "0.5297729", "0.5292263", "0.5289725", "0.52856994", "0.5283848", "0.52803975", "0.5279951", "0.5277443", "0.5271978", "0.5268458", "0.52663577", "0.5262589", "0.5261626", "0.52606726", "0.52603066", "0.525023", "0.5249121", "0.5247598", "0.5246055", "0.52440584", "0.52313423", "0.522662", "0.52137387", "0.5208097", "0.5184859", "0.51843697", "0.5183714", "0.51796186", "0.5179416", "0.5175275", "0.5174405", "0.5169866", "0.5164483", "0.5162318", "0.5161257", "0.51564103", "0.5141323", "0.513399", "0.51247513", "0.5124751", "0.5115186", "0.5106926", "0.50978947", "0.507265", "0.50703245", "0.5065946", "0.50622135", "0.50619614" ]
0.0
-1
Ask a yes no question
def ask_yes_no(question): answer = None while answer not in ("y","n"): answer = input(question).lower() return answer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ask_Yes_no(question, **kwargs):\n return ask_yes_no(question, default=\"y\", **kwargs)", "def ask_yes_No(question, **kwargs):\n return ask_yes_no(question, default=\"n\", **kwargs)", "def ask_yes_no(question):\r\n\tresponse = None\r\n\twhile response not in (\"y\", \"n\"):\r\n\t\tresponse = input(question).lower()\r\n\treturn response", "def ask_yes_no(text):\n if text.strip()[0] == 'n' or text.strip()[0] == 'N':\n return False\n else:\n return True", "def ask_yes_no(message=\"\", title=None):\n return dialog(\"ask_yes_no\", message=message, title=title)", "def eval_y_n(self, question):\n answer = raw_input(question + \" [y/n] : \")\n return answer.lower() in ['y', 'yes']", "def _yes_no_select(question):\n while True:\n response = input(question + \" [y/n] \")\n if response in [\"y\", \"yes\"]:\n return True\n elif response in [\"n\", \"no\"]:\n return False\n else:\n print(\"\\nPlease select y or n\\n\")", "def confirm_yes():\r\n confirm = raw_input(\"Enter 'yes' to confirm: \")\r\n if confirm == 'yes':\r\n return True\r\n return False", "async def ask_yes_or_no(question: str, threshold: float = 0.75) -> bool:\n return classify_yes_no(str(await ask(question)), threshold=threshold)", "def ask_yesno(prompt):\n more = input(prompt)\n while more not in [\"y\", \"n\"]:\n print(\"I beg your pardon!\")\n more = input(prompt)\n return more == 'y'", "def prompt_yes_no(message, color):\n\tquestions = [inquirer.List('choice',\n\t message=color + Style.BRIGHT + message + Fore.BLUE,\n\t choices=[' Yes', ' No'],\n\t ),\n\t ]\n\n\tanswers = inquirer.prompt(questions)\n\treturn answers.get('choice').strip().lower() == 'yes'", "def ask_yesno(title='Question', message=''):\n if not isinstance(title, string_types):\n raise TypeError('ask_yesno() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('ask_yesno() message must be a string.')\n return _get_app().ask_yesno(title, message)", "def query_yes_no(self, question, default=None):\r\n valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}\r\n if default is None:\r\n prompt = ' [Y/N] '\r\n elif default == 'yes':\r\n prompt = ' [Y/N] '\r\n elif default == 'no':\r\n prompt = ' [Y/N] '\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n if choice in valid:\r\n return valid[choice]\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")\r\n\r\n return", "def confirm():\n answer = \"\"\n while answer not in [\"y\", \"n\"]:\n answer = input(\"OK with that [Y/N]? \").lower()\n return answer == \"y\"", "def test_ask_yes_no_1(self, input_mock):\n response = basic.ask_yes_no()\n self.assertTrue(response)", "def confirmation(self, question, answer):\n confirm_flag = False\n while confirm_flag not in ['y', 'n']:\n confirm_flag = raw_input(question + ' [y/n]: ')\n if confirm_flag == 'y':\n print answer\n elif confirm_flag == 'n':\n print 'The user cancel the operation'\n exit()\n else:\n print 'The entry is not valid, please enter y or n.'\n return True", "def yes_or_no_question(self, text: Optional[str] = \"J / N: \") -> bool:\n while True:\n choice = input(text)\n\n try:\n return self.yes_no_answer_choices[choice]\n\n except KeyError:\n self.error(f\"Ungültige Auswahl '{choice}'.\")\n print(\"Bitte erneut eingeben.\")\n continue", "def ask(self, question):\n if self.options.yes:\n return True\n\n result = False\n while True:\n print(question + ' [y/n] ')\n response = sys.stdin.readline()\n if response:\n if response[0].lower() == 'y':\n result = True\n break\n elif response[0].lower() == 'n':\n break\n print('Please type \"y\" for yes or \"n\" for no')\n return result", "def confirm(msg=\"\"):\n answer = \"\"\n if not msg: msg = \"OK to continue\"\n while answer not in [\"y\", \"n\"]:\n answer = input(msg+\" [Y/N]? \").lower()\n return answer == \"y\"", "def AskYesNo(question, title=''):\n\n # build the message dialogue\n dial = wx.MessageDialog(None, question, title,\n wx.YES_NO|wx.NO_DEFAULT|wx.ICON_QUESTION)\n\n # run it and get the answer/event\n ret = dial.ShowModal()\n\n if (ret == wx.ID_YES): # process user answer\n return True\n else:\n return False", "def confirm(msg: str) -> bool:\n res = input(msg + \" (Y/n) > \")\n if res == 'Y' or res == 'y' or res == 'yes' or res == 'Yes' or res == \"\":\n return True\n return False", "def query_yes_no(self,question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\", \"no\":\"no\", \"n\":\"no\"}\n\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def ask_user( prompt ):\n answer = raw_input( prompt )\n if answer.lower() in [\"y\",\"yes\"]:\n return True\n else:\n return False", "def test_ask_yes_no_2(self, input_mock):\n response = basic.ask_yes_no()\n self.assertFalse(response)", "def read_yes_no(prompt):\n ans = input(str(prompt) + ' [Y/n] ').lower()\n if ans in ['', 'y', 'yes']:\n return True\n else:\n return False", "def yesno(question, title=None, bitmap=None, yes=None, no=None, checkbox=None, checked=None):\n\n if title is None:\n title = _('Yes or no?')\n if yes is None:\n yes = _(\"Okay\")\n if no is None:\n no = _(\"Cancel\")\n if checkbox is None:\n checkbox = _(\"Apply to all\")\n\n return msg_dialogs.promptmsg(question, title, bitmap, yes, no, checkbox, checked)", "def ask(prompt):\n\n return renpy.exports.invoke_in_new_context(renpy.store.layout.yesno_prompt, None, prompt)", "def affirmative(msg):\n ans = raw_input(msg + \" (enter 'y' or 'Y' for yes): \")\n if ans in ('y', 'Y'):\n return True\n else:\n return False", "def query_yes_no(question, default=True):\n yes_list = [\"yes\", \"y\"]\n no_list = [\"no\", \"n\"]\n\n default_dict = { # default => prompt default string\n None: \"[y/n]\",\n True: \"[Y/n]\",\n False: \"[y/N]\",\n }\n\n default_str = default_dict[default]\n prompt_str = \"%s %s \" % (question, default_str)\n\n while True:\n choice = input_(prompt_str).lower()\n\n if not choice and default is not None:\n return default\n if choice in yes_list:\n return True\n if choice in no_list:\n return False\n\n notification_str = \"Please respond with 'y' or 'n'\"\n print(notification_str)", "def decision(question):\n return click.confirm(question, show_default=True)", "def yes_or_no(question):\n while True:\n ques = input(question)\n if ques.lower().startswith('y'):\n return True\n elif ques.lower().startswith('n'):\n return False\n else:\n print('Y/yes or N/no? ')", "def query_yes_no(question, default=\"yes\"):\n\n valid = {\"yes\": \"yes\", \"y\": \"yes\", \"ye\": \"yes\", \"no\": \"no\", \"n\": \"no\"}\n prompt = {None: \" [y/n] \", \"yes\": \" [Y/n] \", \"no\": \" [y/N] \"}.get(default, None)\n\n if not prompt:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n reply = None\n\n while not reply:\n sys.stdout.write(colorize(question, Colors.PROMPT) + prompt)\n\n choice = input().lower()\n reply = None\n\n if default and not choice:\n reply = default\n elif choice in valid:\n reply = valid[choice]\n else:\n print_failure(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")\n\n return reply == \"yes\"", "def get_confirmation():\n inp = PInput(\"#> \")\n\n inp.add_keyword(\"yes\")\n inp.add_keyword(\"no\")\n\n inp.ask()\n ans = inp.get_input()\n\n if ans == \"yes\":\n return True\n else:\n return False", "def prompt_yes_or_no(question, def_yes=True):\n if def_yes:\n ans = raw_input(question + ' [y] or n: ')\n is_yes = ans == 'y' or ans == ''\n is_no = ans == 'n'\n if not (is_yes or is_no):\n print \"Invalid answer! Answer must be n or y or simple enter.\"\n prompt_yes_or_no(question, def_yes)\n else:\n return is_yes\n else:\n ans = raw_input(question + ' y or [n]: ')\n is_yes = ans == 'y'\n is_no = ans == 'n' or ans == ''\n if not (is_yes or is_no):\n print \"Invalid answer! Answer must be n or y or simple enter.\"\n prompt_yes_or_no(question, def_yes)\n else:\n return is_yes", "def confirm():\n answer = \"\"\n while answer not in [\"y\", \"n\"]:\n answer = input(\"OK to push to continue [Y/N]? \").lower()\n return answer == \"y\"", "def yesnocheck(yn):\n if yn =='y':\n return yn\n elif yn=='n':\n return yn\n else:\n print('Please enter yes (y) or no (n)')\n yn = raw_input('(y/n): ')\n yesnocheck(yn)\n return yn", "def ask_yes_r_no(prompt, retries=10, complaint='Yes or no, please!'):\n\t\twhile True:\n\t\t\tok = raw_input(prompt)\n\t\t\tif ok in ('y', 'ye', 'yes'):\n\t\t\t\treturn True\n\t\t\tif ok in ('n', 'no', 'nop', 'nope'):\n\t\t\t\treturn False\n\t\t\tretries = retries - 1\n\t\t\tif retries < 0:\n\t\t\t\traise IOError('Cannot understand your option, so shave yourself the rest of the yak.')\n\t\t\tprint complaint", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\r\n \"no\": False, \"n\": False}\r\n if default is None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\r\n \"no\":False, \"n\":False}\r\n if default == None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\r\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\r\n \"no\":False, \"n\":False}\r\n if default == None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\r\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\", force=False):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n if not force:\n choice = raw_input().lower()\n else:\n choice = \"yes\"\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\r\n \"no\": False, \"n\": False}\r\n\r\n if default is None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def yes_or_no(prompt):\n response = input(prompt)\n while response not in ['y', 'n']:\n print('Invalid input')\n response = input(prompt)\n\n return response", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\r\n \"no\": False, \"n\": False}\r\n if default is None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\r\n \"(or 'y' or 'n').\\n\")", "def get_yes_no(question):\n complete_question = question + \" ([Y]es/[N]o): \"\n\n answer = get_verified_input(prompt=complete_question, verify_by_func=is_yes_or_no)\n\n # returns True is yes, False if no\n return is_yes_or_no(answer, check_no=False)", "def query_yes_no(question, default=None):\n valid = {\"yes\": True, \"no\": False}\n if default is None:\n prompt = \" [yes/no] \"\n elif default == \"yes\":\n prompt = \" [YES/no] \"\n elif default == \"no\":\n prompt = \" [yes/NO] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == \"\":\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no'.\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n while 1:\n sys.stdout.write(question + prompt)\n if sys.version_info[0]==2:\n choice = raw_input().lower()\n elif sys.version_info[0]>2:\n choice = input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def Confirm(self):\n self.PrintMetadata()\n answer = input(\"Continue [Y/n]? \").lower()\n return not answer.startswith(\"n\")", "def prompt_yes_no(question, default):\n again = 'Unknown response.'\n if default.lower() in ('y', 'yes'):\n options = '(Y/n): '\n elif default.lower() in ('n', 'no'):\n options = '(y/N): '\n else:\n raise ValueError('default must be \"y\", \"yes\", \"n\", or \"no\"')\n\n response = input(' '.join((question, options))).lower()\n while response not in ('y', 'yes', 'n', 'no', ''):\n response = input(' '.join((again, question, options))).lower()\n if response == '':\n return default\n return response", "def query_yes_no(question, default=\"yes\"):\n\tvalid = {\"yes\": True, \"y\": True, \"ye\": True,\n\t\t\t \"no\": False, \"n\": False}\n\tif default is None:\n\t\tprompt = \" [y/n] \"\n\telif default == \"yes\":\n\t\tprompt = \" [Y/n] \"\n\telif default == \"no\":\n\t\tprompt = \" [y/N] \"\n\telse:\n\t\traise ValueError(\"invalid default answer: '%s'\" % default)\n\n\twhile True:\n\t\t# sys.stdout.write(question + prompt)\n\t\tchoice = raw_input(question + prompt).lower()\n\t\t# print(choice)\n\t\tif default is not None and choice == '':\n\t\t\treturn valid[default]\n\t\telif choice in valid:\n\t\t\treturn valid[choice]\n\t\telse:\n\t\t\tsys.stdout.write(\"Please respond with 'yes' or 'no' \"\n\t\t\t\t\t\t\t \"(or 'y' or 'n').\\n\")", "def handle_yes_no_input(prompt):\n user_input = input(prompt).upper()\n\n # Handling bad input\n while user_input not in [\"Y\", \"N\"]:\n user_input = input(f\"\\\"{user_input}\\\" is not a valid input. Please enter \\\"Y\\\" or \\\"N\\\": \")\n\n return user_input == \"Y\"", "def _handle_yn_btn(self, req):\n self.yes = None # initialize\n self.yes_button.setEnabled(True)\n self.no_button.setEnabled(True)\n while self.yes is None: # wait for user input\n rospy.sleep(1.)\n self.yes_button.setEnabled(False)\n self.no_button.setEnabled(False)\n return YesNoResponse(yes=self.yes)", "def y_n(ch):\r\n chs = ['yes', 'y', 'no', 'n']\r\n ch = check(ch, chs)\r\n\r\n if ch == 'yes' or ch == 'y':\r\n return True\r\n return False", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n valid_true = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return (default in valid_true.keys())\n elif choice in valid.keys():\n return (choice in valid_true.keys())\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\n \"yes\": True,\n \"y\": True,\n \"ye\": True,\n \"no\": False,\n \"n\": False\n }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def query_yes_no(question: str, default: str = \"yes\") -> bool:\n valid = {\"yes\": True,\n \"y\": True,\n \"ye\": True,\n \"no\": False,\n \"n\": False}\n\n if default is None:\n prompt = \" [y/n]: \"\n elif default == \"yes\":\n prompt = \" [Y/n]: \"\n elif default == \"no\":\n prompt = \" [y/N]: \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no': \")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n print question + prompt\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n print \"Please respond with 'yes' or 'no' (or 'y' or 'n').\"", "def query_yes_no(question, default=\"yes\"):\n\tvalid = {\"yes\": True, \"y\": True, \"ye\": True,\n\t\t\t \"no\": False, \"n\": False}\n\tif default is None:\n\t\tprompt = \" [y/n] \"\n\telif default == \"yes\":\n\t\tprompt = \" [Y/n] \"\n\telif default == \"no\":\n\t\tprompt = \" [y/N] \"\n\telse:\n\t\traise ValueError(\"invalid default answer: '%s'\" % default)\n\n\twhile True:\n\t\tsys.stdout.write(question + prompt)\n\t\tchoice = raw_input().lower()\n\t\tif default is not None and choice == '':\n\t\t\treturn valid[default]\n\t\telif choice in valid:\n\t\t\treturn valid[choice]\n\t\telse:\n\t\t\tsys.stdout.write(\"Please respond with 'yes' or 'no' \"\n\t\t\t\t\t\t\t \"(or 'y' or 'n').\\n\")", "def test_ask_yes_no_3(self, input_mock):\n response = basic.ask_yes_no(response_attempt=3)\n self.assertIsNone(response)", "def ask_for_confirmation(prompt=\"Are you sure? \", default=True):\n yes, no = (\"Y\", \"n\") if default else (\"y\", \"N\")\n prompt += f\"[{yes}/{no}] \"\n\n while True:\n ans = input(prompt).lower().strip()\n if not ans:\n return default\n elif not (\"yes\".startswith(ans) or \"no\".startswith(ans)):\n print(\"Please enter yes or no.\")\n continue\n else:\n return \"yes\".startswith(ans)", "def test_ask_yes_no_4(self, input_mock):\n response = basic.ask_yes_no()\n self.assertIsNone(response)", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def ask(question):\n while True:\n query = input('{}\\n Reply (y/n) >>'.format(question))\n res = query[0].lower()\n if query == '' or not res in ['y', 'n']:\n pass\n else:\n break\n\n if res == 'y':\n return True\n else:\n return False", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def get_confirm(self):\n self.warning('Would you like to execute[y/N]: ')\n _choice = input()\n choice = _choice.lower() if _choice else 'n'\n err_msg = \"must input yes(y)/no(n), not \" + _choice\n if not choice.startswith(('y', 'n')):\n self.error(err_msg)\n return\n if choice == 'y' or choice == 'yes':\n confirm = True\n elif choice == 'n' or choice == 'no':\n self.info(\"Nothing to do.\")\n confirm = False\n else:\n self.error(err_msg)\n confirm = None\n\n return confirm", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError( _(\"invalid default answer:\") + \" '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write( _(\"Please respond with 'yes' or 'no' \") + \"(or 'y' or 'n').\\n\")", "def confirm(force):\n if not force:\n ans = input(que(bold(\"Are you sure? [y/N]: \")))\n else:\n ans = 'y'\n\n return ans.lower()", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"no\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = { \"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"no\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \" \\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"=> Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=None):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \" \\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True, '1': True,\n \"no\": False, \"n\": False, '0': False, }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default='no'):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None: \n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n print(question, end=' ')\n sys.stdout.write(prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid: \n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n return", "def query_yes_no(question, default=None):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default='yes'):\n valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}\n if default is None:\n prompt = ' [y/n] '\n elif default == 'yes':\n prompt = ' [Y/n] '\n elif default == 'no':\n prompt = ' [y/N] '\n else:\n raise ValueError('invalid default answer: %s' % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n if choice in valid:\n return valid[choice]\n else:\n sys.stdout.write('Please respond with \\'yes\\' or \\'no\\''\n '(or \\'y\\' or \\'n\\').\\n')", "def yes_no_dialog(self, message):\n reply = QMessageBox.question(self, \"Are you sure?\",\n message, QMessageBox.Yes, QMessageBox.Cancel)\n\n if reply == QMessageBox.Yes:\n return True\n else:\n return False", "def confirm(self, prompt=None, resp=False):\n\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print 'please enter y or n.'\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def askYes(parent,message,title='',default=True):\r\n style = wx.YES_NO|wx.ICON_EXCLAMATION|((wx.NO_DEFAULT,wx.YES_DEFAULT)[default])\r\n return askStyled(parent,message,title,style)", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n print(colored(question + prompt, 'white', 'on_red', attrs=['reverse', 'blink']))\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")" ]
[ "0.8196788", "0.8141998", "0.8027091", "0.79333156", "0.78017837", "0.7636633", "0.75908566", "0.75773454", "0.7574332", "0.7567365", "0.75321203", "0.7493373", "0.7484439", "0.74384576", "0.7435376", "0.7426852", "0.7407709", "0.73929524", "0.7383109", "0.7374345", "0.7352029", "0.73406374", "0.73350585", "0.7333078", "0.7323289", "0.73005426", "0.7295534", "0.72756106", "0.7268732", "0.72200173", "0.72130936", "0.7209684", "0.7178435", "0.7172694", "0.7150949", "0.71468186", "0.71278805", "0.7126038", "0.71230453", "0.71230453", "0.71217996", "0.7110703", "0.71053505", "0.71045625", "0.7100578", "0.70970577", "0.7089852", "0.708326", "0.7081595", "0.7080372", "0.70776427", "0.70771694", "0.70564175", "0.7050474", "0.7044654", "0.7041744", "0.7034498", "0.7032745", "0.7030707", "0.70198697", "0.7019173", "0.7019148", "0.7016588", "0.7014359", "0.7014359", "0.7014359", "0.7014359", "0.7014359", "0.70142406", "0.70122886", "0.70064855", "0.7006091", "0.7002439", "0.70020366", "0.7001576", "0.70010954", "0.69941473", "0.69929415", "0.69929415", "0.69929415", "0.69929415", "0.69929415", "0.69929415", "0.69929415", "0.69929415", "0.6988077", "0.6987517", "0.6985617", "0.69851696", "0.6981514", "0.6968211", "0.6968211", "0.6968211", "0.6968211", "0.6968211", "0.6965747", "0.6963643", "0.6962298", "0.69621956", "0.6954878" ]
0.7771797
5
Clears the terminal screen.
def clear_screen(): # Clear command as function of OS command = "cls" if system_name().lower()=="windows" else "clear" # Action system_call(command)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_screen() -> None:\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")", "def clear_screen():\n os.system('cls')", "def clear_screen():\n\tprint(\"\\033[H\\033[J\")", "def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear_screen():\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')", "def clear_screen():\n os.system(\"cls\" if os.name == 'nt' else 'clear')", "def clear_screen():\n if os.name == 'nt':\n os.system(\"cls\")\n else:\n os.system(\"clear\")", "def _clear_screen():\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')", "def screen_clear():\n from subprocess import call\n import os\n call('clear' if os.name == 'posix' else 'cls')", "def clear_screen():\r\n if os.name in ('nt','dos'):\r\n os.system(\"cls\")\r\n elif os.name in ('linux','osx','posix'):\r\n os.system(\"clear\")\r\n else:\r\n print(\"\\n\") * 120", "def clear_terminal(self):\n os.system('clear')", "def clear_screen():\n if name == \"nt\":\n system('cls')\n else:\n system('clear')", "def clearTerminal():\r\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear_screen():\n print('\\n' * TERMINAL_HEIGHT)", "def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n self.display_heading()\n self.display_empty_lines()", "def clear_console():\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear_console():\n os.system('cls' if os.name == 'nt' else \"clear\")", "def clear_screen(out=sys.stdout) -> None:\n ## The ANSI escape codes we use here are described e.g. on:\n # https://en.wikipedia.org/wiki/ANSI_escape_code#CSIsection\n #\n ## To sum up the escape codes used below:\n # \\x1b - Escape | Starts all the escape sequences\n # [ - Control Sequence Introducer | Starts most of the useful sequences\n # H - Cursor Position | Moves the cursor to row n, column m (default=1)\n # \\x1b - Escape | Starts all the escape sequences\n # <n> J - Erase in Display | Clears part of the screen.\n # If n is 0 (or missing), clear from cursor to end of screen.\n # If n is 1, clear from cursor to beginning of the screen.\n # If n is 2, clear entire screen (and moves cursor to upper left on DOS ANSI.SYS).\n # If n is 3, clear entire screen and delete all lines saved in the\n # scrollback buffer (this feature was added for xterm and is supported\n # by other terminal applications\n out.write(\"\\x1b[H\\x1b[2J\")", "def clearscreen():\n if os.name == 'nt':\n os.system('cls')\n elif os.name == 'posix':\n os.system('clear')\n else:\n print \"Untested OS. Please tell the developer you're on: %s\" % os.name \n sys.exit(0)", "def clear_console():\n import os\n clear = lambda: os.system('cls')\n clear()\n return None", "def resetTerminal():\n sys.stdout.write('\\n\\n') # add a few blank lines\n sys.stdout.flush()\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')", "def clear_screen():\n\n # Clear command as function of OS\n command = \"cls\" if platform.system().lower()==\"windows\" else \"clear\"\n\n # Action\n return subprocess.call(command) == 0", "def clear():\n sys.stdout.write('\\033[2J')\n sys.stdout.write('\\033[H')\n sys.stdout.flush()", "def reset_screen() -> None:\n os.system(\"clear\") if os.name == \"posix\" else os.system(\"cls\")\n print(logo)\n print(\"=\" * 80)", "def clear(self):\n # Erase current output first.\n self.erase()\n\n # Send \"Erase Screen\" command and go to (0, 0).\n output = Output(self.stdout)\n\n output.erase_screen()\n output.cursor_goto(0, 0)\n output.flush()\n\n self.request_absolute_cursor_position()", "def clear():\n\n if not CLEAR_PRINT[0]:\n try:\n if os.name == \"nt\":\n # For windows.\n os.system(\"cls\")\n\n elif os.name == \"posix\":\n # For mac/linux.\n os.system(\"clear\")\n\n else:\n # Unknown operating system, just print a newline a bunch of times.\n print(\"\\n\" * CLEAR_PRINT[1])\n\n except:\n # Can't figure out the operating system, safest bet is to just print a newline a bunch of times.\n print(\"\\n\" * CLEAR_PRINT[1])\n\n else:\n # The clearing of screen is overriden, so we just print a newline CLEAR_PRINT[1] times.\n print(\"\\n\" * CLEAR_PRINT[1])", "def clearConsole():\r\n\r\n command = 'clear' # command for console clearing\r\n if os.name in ('nt', 'dos'): # if the machine is running on Windows, then use cls\r\n command = 'cls'\r\n os.system(command) # othen than Windows, use clear\r", "def clearScreen():\n dislin.erase()", "def clear_screen(self) -> \"Cursor\":\n self._output.write(\"\\x1b[2J\")\n\n return self", "def clearScreen():\n pass", "def clear() -> None:\n\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear() -> None:\n\n os.system('cls' if os.name == 'nt' else 'clear')", "def command_clearterm():\n subprocess.call(\"reset\")", "def clear(self) -> None:\n self.screen.clear()", "def clear():\n print(chr(27) + \"[2J\")\n print(\"\\033[0;0H\")", "def clear_screen(self,):\n sys.stdout.write('\\033[2J')\n sys.stdout.write('\\033[H')\n sys.stdout.flush()\n print \"\\n\\t\\tDo To - %s\\n\\n\" % self.user", "def clear():\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear():\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear():\r\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear(screen):\n screen.clear()\n screen.refresh()", "def clear_player_screen():\n print('\\n' * get_terminal_size().lines, end='')\n\n return None", "def clear():\n if platform.system() == \"Windows\":\n os.system('cls')\n elif platform.system() == \"Linux\":\n os.system('clear')", "def clear_screen(self):\n logging.info(\"Clearing screen for console_ip %s port %s\", self.console_ip, self.act_port)\n console = pexpect.spawn('telnet %s %s'%(self.console_ip,self.act_port))\n console.logfile = self.log\n console.send('\\003')\n console.close()\n\n if self.stnd_port and self.stnd_console_ip:\n logging.info(\"Clearing screen for console_ip %s port %s\", self.stnd_console_ip, self.stnd_port)\n console = pexpect.spawn('telnet %s %s'%(self.stnd_console_ip,self.stnd_port))\n console.logfile = self.log\n console.send('\\003')\n console.close()\n logging.info('done clear screen') \n return", "def clear():\n sub.call('cls', shell=True)", "def clear():\n\n # windows \n if os.name == \"nt\": \n _ = os.system(\"cls\") \n # mac and linux\n else: \n _ = os.system(\"clear\")", "def clear_console(cls):\n print('\\n' * 200)", "def clear(screen=None):\n if screen is None:\n screen = lv.scr_act()\n screen.clean()", "def clear_screen(self) -> None:\n assert self.screen is not None\n self.screen.clear()\n self.refresh_screen()", "def clear_screen(self):\n self.ClearAll()\n self.new_prompt(self.input_prompt_template.substitute(\n number=(self.last_result['number'] + 1)))", "def clearscreen(numlines=10):\n import os\n if os.name == \"posix\":\n # Unix/Linux/MacOS/BSD/etc\n os.system('clear')\n elif os.name in (\"nt\", \"dos\", \"ce\"):\n # DOS/Windows\n os.system('CLS')\n else:\n # Fallback for other operating systems.\n print '\\n' * numlines", "def clear():\r\n if name == 'nt':\r\n _ = system('cls')\r\n else:\r\n _ = system('clear')", "def clear():\n # TODO: this should actually create a stack of output so I can test each screen\n lines.clear()", "def ClearDisplay():\n display.fill(0)", "def clear():\n if os.name == 'nt': \n os.system('cls') \n else: \n os.system('clear')", "def clear():\n try:\n try:\n # For Macs and Linux\n os.system('clear');\n except:\n # For Windows REPORTED BUG: Sometimes does not work on 64 bit Windows\n os.system('cls');\n except:\n # If nothing else works, a hacky, non optimal solution\n for i in range(50): print(\"\")", "def clear(): \n if os.name == \"nt\":\n os.system(\"cls\")\n else:\n os.system(\"clear\")", "def clear():\n\n os.system(\"clear\")", "def clear():\n clear_output()", "def clear_screen(self):\n if self.x:\n self.move_cur_up((self.prev_x+1)/self.get_col_width())\n self.clear_line(self.get_num_lines(self.prev_lines) +\n self.get_num_lines(['>' + self.prev_str + ' ']))\n #time.sleep(2)", "def clear():\n\tglobal _s\n\t_s.screen.fill(_s.back)\n\t_s.tab(0,0)\n\t_flip()", "def console_clear(wait_time):\n\n sleep(wait_time) # Produces a delay based on input passed through console_clear()\n\n # These commands only work in the terminal\n try:\n system(\"cls\") # Clears console for users on Windows operating system\n\n except:\n system(\"clear\") # Clears console for users on Mac and Linux operating systems", "def limpa_console() -> None:\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')", "def do_clear(self, arg):\r\n if platform.system == \"Windows\":\r\n os.system(\"cls\")\r\n else:\r\n os.system(\"clear\")", "def cls(self):\n os.system('clear')", "def clear():\n if \"Windows\" in system():\n call(\"cls\")\n else:\n call(\"clear\")", "def clean_up_terminal(self) -> None:\n if self.stdscr:\n # Disable the Keypad mode\n self.stdscr.keypad(False)\n # Renable caracters echoing\n curses.echo()\n # Disable the interrupts\n curses.nocbreak()\n # Restore the terimnal to it's orginial operating mode\n curses.endwin()", "def cls():\n os.system(\"cls\")\n os.system(\"clear\") # clear the moniter function", "def reset():\n if os.name == \"posix\": #In linux\n os.system(\"clear\")\n elif os.name == (\"ce\", \"nt\", \"dos\"): #In windows\n os.system(\"cls\")", "def _clear(self):\n self._calculate_bar_width()\n # sys.stdout.write(\"\\033[K\")\n # to fix bug when logging to console\n print(\" \" * self._tw, end='\\r')\n # sys.stdout.write(\"\\033[K\")", "def wipe(self):\n self.console.clear()", "def clear(self):\n self.command(_LCD_CLEARDISPLAY)\n self._cursor_pos = (0, 0)\n self._content = [[0x20] * self.cols for _ in range(self.rows)]\n time.sleep(2*MILLISECOND)", "def clear():\n print(black, end='')", "def clear(self):\n self.command(self.LCD_CLEARDISPLAY)\n self._cursor_pos = (0, 0)\n self._content = [[0x20] * self._cols for _ in range(self._rows)]\n self._msleep(2)", "def _do_clear(self):\n print()\n print()\n console.rule()\n os.system(\"cls\" if os.name in (\"nt\", \"dos\") else \"clear\")\n self.history_manager.remove_items(n=1)", "def consoleClear() -> None:\n\t\tLogging._console.clear()", "def clear_display(self) -> None:\n pass", "def cls():\n # TODO: Check if this covers all systems\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear_output():\n print(\"\\n\" * 20)", "def clear_console(self, event):\n if self.debug:\n print(\"Clearing...\", str(event), event)\n self.view.console_panel.clear()", "def clearScreen(self):\n background = pygame.Surface(self.getSize())\n background = background.convert()\n background.fill((0, 0, 0))\n self.screen.blit(background, (0, 0))", "def clear(self) -> None:\n\n self.screen.fill(self.bg)", "def term():\n curses.endwin()\n unicornhathd.off()", "def reset_screen(y_position=0, x_position=0, reset=True):\n if os.name is not 'nt':\n # Send an ansi clear\n if reset:\n print('\\033[2J')\n # Set the cursor\n print('\\033[{:d};{:d}H'.format(y_position, x_position))", "def clear_console(self):\n try:\n self.__clear_telnet_port(self.console_ip, self.act_port)\n if self.stnd_port and self.stnd_console_ip:\n self.__clear_telnet_port(self.stnd_console_ip, self.stnd_port)\n except:\n logging.info(\"Some issue in clearing console\")\n return", "def do_clear(self, line):\n\t if os.name == 'nt':\n\t os.system('cls')\n\t else:\n\t os.system('clear')", "def clear_screen(self, w_height):\r\n\r\n print(\"\\n\"*(w_height))", "def clean():\n if system() == 'Windows':\n os.system('cls')\n else:\n os.system('clear')", "def reset_terminal():\n if not mswin:\n subprocess.call([\"tset\", \"-c\"])", "def reset_color():\n sys.stdout.write(\"\\033[0m\")", "def clear_output(self) -> \"Cursor\":\n self._output.write(\"\\x1b[0J\")\n\n return self", "def reset_term_colors():\n sys.stdout.write(ENDC)", "def clear(self):\n pygame.draw.rect(self.screen,BLACK,(0,0,WINDOWWIDTH,\n WINDOWHEIGHT))\n pygame.display.update()", "def clear_screen():\n print('now printing 25 lines')\n for _ in range(2):\n nine_lines()\n for _ in range(2):\n three_lines()\n new_line()\n return", "def test_clear_windows(self):\n with mock.patch(\"hangman.cli.screen.os.system\") as mock_system:\n hangman.cli.screen.Screen.clear()\n mock_system.assert_called_with(\"cls\")", "def clear_main(self):\n\n if self.terminate:\n return\n\n self.windows['MAIN'].erase()\n self.windows['MAIN'].border(' ', ' ',\n curses.ACS_HLINE, curses.ACS_HLINE,\n curses.ACS_HLINE, curses.ACS_HLINE,\n curses.ACS_HLINE, curses.ACS_HLINE)" ]
[ "0.89045364", "0.8903247", "0.8894391", "0.8885713", "0.8885713", "0.88519347", "0.88519347", "0.88374126", "0.8820877", "0.8814398", "0.8805836", "0.87480396", "0.874027", "0.86738855", "0.866956", "0.8638429", "0.85816944", "0.84902024", "0.83856225", "0.8375647", "0.8366166", "0.83394", "0.82687205", "0.8191035", "0.8182261", "0.81524175", "0.81486773", "0.8146845", "0.8110288", "0.8109352", "0.8094877", "0.80254596", "0.8024008", "0.7912952", "0.7912952", "0.78812253", "0.7874313", "0.78557724", "0.78459924", "0.782456", "0.782456", "0.7822496", "0.77965826", "0.7767428", "0.7730905", "0.76861465", "0.7652012", "0.7638143", "0.76320016", "0.7615118", "0.760055", "0.7581962", "0.7581512", "0.755217", "0.7539026", "0.7509291", "0.75091857", "0.74966735", "0.7486009", "0.7442636", "0.7442593", "0.7395101", "0.7369681", "0.7299393", "0.72843176", "0.7280678", "0.72781986", "0.72699934", "0.7254459", "0.72468454", "0.7238243", "0.7224971", "0.7222553", "0.7211595", "0.71534383", "0.71246845", "0.71152866", "0.7066757", "0.7053929", "0.7032904", "0.6993472", "0.6976116", "0.69097674", "0.69097024", "0.6907151", "0.690599", "0.68995655", "0.6863899", "0.683866", "0.68311816", "0.6830095", "0.6814514", "0.6812274", "0.68053025", "0.6764987", "0.67207485", "0.66669893", "0.66066855" ]
0.87332195
15
Read and preprocess an image with data augmentation (random transform).
def read_for_training(p, augmentation=False): img = imread(TRAIN + p, mode='RGB') msk = img if mode == 'background': data = {'image': img} elif mode == 'instance' or mode == 'code': msk = imread(TRAIN_MASK + p.replace('.jpg', '.png')) data = {'image': img, 'mask': msk} if augmentation: data_aug = strong_aug()(**data) img = data_aug['image'] if 'mask' in data_aug: msk = data_aug['mask'] if mode == 'instance' or mode == 'code': img[~msk.astype(np.bool)] = 0 img, msk = size_normalization(img, msk) if mode == 'code': img = encode(img, msk) return img, msk
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess(self, img):\n img_ = image.load_img(img, target_size=(299, 299))\n img_ = image.img_to_array(img_)\n img_ = np.expand_dims(img_, axis=0)\n img_ = preprocess_input(img_)\n return img_", "def data_augmentation(image, aug):\n if (aug == \"random_crop\") and (random.randint(0,1)):\n image = random_crop(image) \n if (aug == \"random_rotation\") and (random.randint(0,1)): \n image = random_rotation(image) \n if (aug == \"random_flip\") and (random.randint(0,1)): \n image = random_flip(image)\n if (aug == \"affine_transformation\") and (random.randint(0,1)): \n image = affine_transformation(image)\n if (aug == \"random_gaussian_noise\") and (random.randint(0,1)): \n image = random_gaussian_noise(image)\n if (aug == \"random_erasing\") and (random.randint(0,1)): \n image = random_erasing(image) \n return image", "def preprocess(self):\n meta_file_path = os.path.join(database_directory, 'data.txt')\n meta = pd.read_csv(meta_file_path, delimiter=' ', header=None)\n meta = meta[meta[0] != '45567.jpg'] # Corrupt image.\n meta.to_pickle(os.path.join(database_directory, 'meta.pkl'))\n for file_name in meta.iloc[:, 0].values:\n if file_name.endswith('.jpg'):\n file_path = os.path.join(database_directory, file_name)\n image = imageio.imread(file_path).astype(np.uint8)\n image = transform.resize(image, (self.preprocessed_image_size, self.preprocessed_image_size),\n preserve_range=True)\n image = image.transpose((2, 0, 1))\n np.save(file_path.replace('.jpg', '.npy'), image)", "def load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)", "def pre_processing_function(label, filename: str, augmentor: Augmentor = None):\n image = imread(filename)\n if augmentor is not None:\n image = np.round(augmentor.run(image)).astype(np.uint8)\n\n return image, label", "def caffe_preprocess(img):\n out = numpy.copy(img)\n out -= IMAGE_256_MEAN[None, None, :]\n out = out[:, :, [2, 1, 0]] # swap channel from RGB to BGR\n return out", "def preprocess(img):\n # standard mean and std for the model\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n # resize\n img = img.resize(size = (224, 224))\n # transforms to numpy\n img = np.array(img, dtype = np.float64)\n # Mean and Std\n img = (img - mean)/std\n # transpose [channels first]\n img = img.transpose((2, 0, 1))\n # conver to Tensor\n img = torch.from_numpy(img)\n return img", "def _preprocess_image(self, input_data):\n image = self.preprocessor.preprocess(input_data.images)\n return InputData(images=image, labels=input_data.labels)", "def load_and_prep_image(img, img_shape=224):\n\n # Decode the read file into a tensor & ensure 3 colour channels \n # (our model is trained on images with 3 colour channels and sometimes images have 4 colour channels)\n img = tf.image.decode_image(img, channels=3)\n\n # Resize the image (to the same size our model was trained on)\n img = tf.image.resize(img, size = [img_shape, img_shape])\n\n # Rescale the image (get all values between 0 and 1)\n img = img/255.\n return img", "def _load_preprocess_image(self, image_file):\n\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image", "def chainercv_preprocess(image):\n image = skio.imread(image)\n image = image.transpose(2, 0, 1)\n return [image]", "def preprocess_example_input(input_config):\n\n input_path = input_config[\"input_path\"]\n input_shape = input_config[\"input_shape\"]\n one_img = imread(input_path)\n if \"normalize_cfg\" in input_config.keys():\n normalize_cfg = input_config[\"normalize_cfg\"]\n mean = np.array(normalize_cfg[\"mean\"], dtype=np.float32)\n std = np.array(normalize_cfg[\"std\"], dtype=np.float32)\n one_img = imnormalize(one_img, mean, std)\n one_img = imresize(one_img, input_shape[2:][::-1]).transpose(2, 0, 1)\n one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(True)\n (_, C, H, W) = input_shape\n one_meta = {\n \"img_shape\": (H, W, C),\n \"ori_shape\": (H, W, C),\n \"pad_shape\": (H, W, C),\n \"filename\": \"<demo>.png\",\n \"scale_factor\": 1.0,\n \"flip\": False,\n }\n\n return one_img, one_meta", "def _load_preprocess_image(self, image_file):\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image", "def preprocess_image(image, label, is_training):\n if is_training:\n # Randomly scale the image and label.\n image, label = preprocessing.random_rescale_image_and_label(\n image, label, _MIN_SCALE, _MAX_SCALE)\n\n # Randomly crop or pad a [_HEIGHT, _WIDTH] section of the image and label.\n image, label = preprocessing.random_crop_or_pad_image_and_label(\n image, label, _HEIGHT, _WIDTH, _IGNORE_LABEL)\n\n # Randomly flip the image and label horizontally.\n image, label = preprocessing.random_flip_left_right_image_and_label(\n image, label)\n\n image.set_shape([_HEIGHT, _WIDTH, 3])\n label.set_shape([_HEIGHT, _WIDTH, 1])\n print(\"seg11111111111\",image,label)\n image = preprocessing.mean_image_subtraction(image)\n\n return image, label", "def data_tf(img):\n img = crop.process(img)\n img = random_contrast.process(img)\n img = random_brightness.process(img)\n img = random_color.process(img)\n img = random_sharpness.process(img)\n if img.size[1] >= 32:\n img = compress.process(img)\n img = adjust_resolution.process(img)\n img = blur.process(img)\n img = exposure.process(img)\n # img = rotate.process(img)\n img = salt.process(img)\n img = inverse_color(img)\n img = stretch.process(img)\n return img", "def pre_analyse():\n t = transform()\n model = modified_resnet50()\n model.load_state_dict(\n torch.load(\n \"model.pth.tar\",\n map_location=torch.device(\"cpu\"),\n )[\"state_dict\"]\n )\n model.eval()\n\n def get_preds(img_path):\n \"\"\"\n Gives labelds and probabilities for a single image\n This is were we preprocess the image, using a function defined in the model class\n \"\"\"\n # load image\n img = Image.open(img_path).convert(\"RGB\")\n # process it\n x = t(img)\n # get in in the right format\n x = Variable(x).unsqueeze(0)\n # predictions\n output = model(x)\n # decode\n output = decode(output.cpu().data.numpy()[0])\n\n # filter\n # return pred, proba\n return output\n\n return get_preds(\"image.jpg\")", "def preprocess(self, img):\n return img - np.mean(img)", "def image_preprocessing(img):\n\n # Removing parasite data (sky, trees and front of the car)\n return img[60:-20, :, :]", "def preprocess_image(image, is_training):\n if is_training:\n # Resize the image to add four extra pixels on each side.\n image = tf.image.resize_image_with_crop_or_pad(\n image, _IMAGE_SIZE + 8, _IMAGE_SIZE + 8)\n\n # Randomly crop a [_HEIGHT, _WIDTH] section of the image.\n image = tf.random_crop(image, [_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS])\n\n # Randomly flip the image horizontally.\n image = tf.image.random_flip_left_right(image)\n\n # Subtract off the mean and divide by the variance of the pixels.\n image = tf.image.per_image_standardization(image)\n return image", "def load_and_preprocess_image(path, max_dim=512):\n f = tf.io.read_file(path)\n img = tf.io.decode_image(f)\n img = resize_min(img, max_dim)\n img = tf.expand_dims(img, axis=0)\n img = vgg_preprocess_input(img)\n return img", "def load_image(path, preprocess=True):\n x = image.load_img(path, target_size=(H, W))\n if preprocess:\n x = image.img_to_array(x)\n x = np.expand_dims(x, axis=0)\n x = x / 255.0\n return x", "def preprocess(image):\n return image - MEAN_PIXEL", "def preprocess(image):\n image = rgb2yuv(image)\n return image", "def preprocess(img_name, quality=None):\n img = plt.imread(img_name)\n Y, X, C = img.shape\n img = img[:Y-Y%2, :X-X%2, :3]\n if quality is not None:\n img = jpeg_compress(img, quality)\n img = img_to_tensor(img).cuda().type(torch.float)\n return img", "def preprocess(file_path, model_preprocess_function):\n img = image.load_img(file_path, target_size=(224, 224))\n x = image.img_to_array(img)\n # x = np.expand_dims(x, axis=0)\n x = model_preprocess_function(x)\n return x", "def preprocess_image(image, training):\n ### YOUR CODE HERE\n\n if training:\n \n # Resize the image to add four extra pixels on each side.\n # image = tf.image.resize_image_with_crop_or_pad(image, 32 + 8, 32 + 8)\n image = np.pad(image, ((4,4),(4,4),(0,0)), 'constant')\n \n # Randomly crop a [32, 32] section of the image.\n # image = tf.random_crop(image, [32, 32, 3])\n _x_axis = np.random.randint(9)\n _y_axis = np.random.randint(9)\n image = image[_x_axis:_x_axis+32, _y_axis:_y_axis+32, :]\n \n # Randomly flip the image horizontally.\n # image = tf.image.random_flip_left_right(image)\n if np.random.randint(2) == 1:\n image = np.flip(image, axis=0)\n\n # Subtract off the mean and divide by the standard deviation of the pixels.\n # image = tf.image.per_image_standardization(image)\n mean = np.mean(image)\n adjusted_stddev = max(np.std(image), 1.0/np.sqrt(3072))\n image = (image - mean) / adjusted_stddev\n ### END CODE HERE\n\n return image", "def preprocess_image(image):\n image = tf.image.decode_jpeg(image, channels=3, dct_method=\"INTEGER_ACCURATE\")\n return scale(image)", "def preprocess_image(image, training):\r\n if training:\r\n ### YOUR CODE HERE\r\n hpad = np.zeros((32,4,3))\r\n image = np.hstack((image,hpad))\r\n image = np.hstack((hpad,image))\r\n\r\n vpad = np.zeros((4,40, 3))\r\n image = np.vstack((image, vpad))\r\n image = np.vstack((vpad, image))\r\n\r\n #print(np.shape(image))\r\n # Resize the image to add four extra pixels on each side.\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Randomly crop a [32, 32] section of the image.\r\n # HINT: randomly generate the upper left point of the image\r\n rx = np.random.randint(8)\r\n ry = np.random.randint(8)\r\n crp_img = image[rx:rx+32,ry:ry+32,:]\r\n #print(np.shape(crp_img))\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Randomly flip the image horizontally.\r\n # for i in range(crp_img.shape[0]):\r\n # crp_img[i] = np.fliplr(crp_img[i])\r\n rf = np.random.randint(2)\r\n if(rf == 0):\r\n crp_img = np.fliplr(crp_img)\r\n #print(np.shape(crp_img))\r\n image = crp_img\r\n\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Subtract off the mean and divide by the standard deviation of the pixels.\r\n cmean = []\r\n cstd = []\r\n for i in range(np.shape(image)[2]):\r\n arr = image[:,:,i]\r\n cmean = np.mean(arr)\r\n cstd = (np.std(arr))\r\n lfn = lambda x : (x-cmean)/cstd\r\n image[:,:,i] = lfn(arr)\r\n #print(np.shape(image))\r\n\r\n ### YOUR CODE HERE\r\n\r\n return image", "def _load_data(self, imagepath):\n im = cv2.imread(imagepath)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)", "def preprocess(img):\n if img.ndim != 3:\n raise TypeError('bad ndim of img')\n if img.dtype != np.uint8:\n raise TypeError('bad dtype of img')\n img = cv2.resize(img, (224, 224))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img.astype(np.float32)\n img *= (2.0/255) # normalize to: 0.0~2.0\n img -= 1.0 # subtract mean to make it: -1.0~1.0\n img = np.expand_dims(img, axis=0)\n return img", "def _preprocess_image(self, image_raw):\n\n image = tf.io.decode_raw(image_raw, tf.float64)\n\n return image * self.rescale", "def load_image(img, image_dir, grab_mean_std, preprocess=True, H=180, W=180):\r\n img_path = os.path.join(image_dir,img)\r\n mean, std = grab_mean_std\r\n x = image.load_img(img_path, target_size=(H, W))\r\n x = np.array(x)\r\n if preprocess:\r\n x = x - mean\r\n x = x / std\r\n x = np.expand_dims(x, axis=0)\r\n return x", "def _preprocess_image(self, image_raw):\n\n image = tf.io.decode_raw(image_raw, tf.float64)\n \n if self.rescale is not None:\n image_out = image * self.rescale\n else:\n image_out = image\n\n return image_out", "def process_image(image_path):\n IMG_SIZE = 224 # Size of images used for training\n IMG_MEAN = [0.485, 0.456, 0.406]\n IMG_SDEV = [0.229, 0.224, 0.225]\n\n # Load PIL image\n image = Image.open(image_path)\n\n # Resize to 256 max dim\n if image.size[0] >= image.size[1]:\n image.thumbnail((256, image.size[1] * 256 // image.size[0]))\n else:\n image.thumbnail((image.size[0] * 256 // image.size[1], 256))\n\n # Center crop\n image = image.crop((\n (image.size[0] - IMG_SIZE) // 2,\n (image.size[1] - IMG_SIZE) // 2,\n (image.size[0] + IMG_SIZE) // 2 ,\n (image.size[1] + IMG_SIZE) // 2))\n # Convert to np.array and rescale color channels to 0-1\n image = np.array(image) / 255\n # Normalize image\n image = (image - np.array(IMG_MEAN)) / np.array(IMG_SDEV)\n # Rearrange to make color channel first dimension\n image = image.transpose(2, 0, 1)\n # Convert to toch.FloatTensor\n image_tensor = torch.from_numpy(\n np.expand_dims(image, axis=0)).type(torch.FloatTensor)\n\n return image_tensor", "def preprocess(img):\n \n scaler=StandardScaler() ## scaler object to perform preprocessing\n img=scaler.fit_transform(img) ## zero-center and normalize\n \n return img", "def preprocess(path, img_w, img_h):\n #print(path)\n img = cv2.imread(path)\n #print(img.shape)\n #resizing the image to particular size (64, 128, 3)\n img = fix_size(img, img_w, img_h)\n #print(img.shape)\n \n #assigining values less than zero to zer0 and greater than zero to 1\n img = np.clip(img, 0, 255)\n\n #changing the interger to more useful and complex integer\n img = np.uint8(img)\n\n #convert an image to one color space to another\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n #chaging the values datatype to float\n img = img.astype(np.float32)\n\n #normalization\n img /= 255\n return img", "def augment(im_path):\n # change directory to toplevel of repo (parent of augmentation)\n os.chdir(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0])\n\n im_name, im_ext = os.path.splitext(im_path)\n if im_path not in os.listdir(\"data/raw\"):\n raise FileNotFoundError(f\"{im_path} could not be found in the list of raw images\")\n\n if im_name + \".json\" not in os.listdir(\"data/corrected\"):\n raise FileNotFoundError(f\"{im_name} has not been labelled yet! (no file '{im_name}.json' in corrected)\")\n\n with open(f\"data/corrected/{im_name}.json\") as read_file:\n im_label = json.loads(read_file.read(-1))\n persp = np.float32(im_label[\"perspective\"])\n\n im: Image.Image = Image.open(f\"data/raw/{im_path}\")\n # downscale image to reasonable height\n scale_factor = 500 / im.height\n persp = persp * scale_factor\n im.thumbnail([1000000, 500])\n im_cv = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR)\n\n # determine crop box\n crop_amount = (im.width - 500)\n left_crop = random.randint(crop_amount//4, 3 * crop_amount // 4)\n # left_crop = crop_amount//2\n right_crop = crop_amount - left_crop\n box = [\n left_crop,\n 0,\n im.width - right_crop,\n im.height\n ]\n\n # warp perspective\n # basic way: add gaussian noise to the 4 corner points\n warped_persp = persp.copy()\n for i in range(4):\n for j in range(2):\n v = warped_persp[i][j]\n v += random.gauss(0, 5)\n # ensure none of the perspective points will fall outside the cropped image\n v = max(box[j] + 5, v)\n v = min(box[j+2] - 5, v)\n warped_persp[i][j] = v\n\n matrix = cv2.getPerspectiveTransform(persp, warped_persp)\n warped_im = cv2.warpPerspective(im_cv, matrix, (im.width, im.height))\n warped_im = Image.fromarray(cv2.cvtColor(warped_im, cv2.COLOR_BGR2RGB))\n\n # run crop on warped image\n warped_im = warped_im.crop(box)\n # adjust warped coordinates according to crop\n for i in range(4):\n warped_persp[i][0] -= box[0]\n warped_persp[i][1] -= box[1]\n\n # scale down to final size\n warped_im = warped_im.resize((256, 256))\n for i in range(4):\n warped_persp[i][0] *= 256 / 500\n warped_persp[i][1] *= 256 / 500\n\n # adjust image colour balance, saturation and contrast\n warped_im = ImageEnhance.Color(warped_im).enhance(random.uniform(0.9, 1.2))\n warped_im = ImageEnhance.Contrast(warped_im).enhance(random.uniform(0.8, 1.2))\n warped_im = ImageEnhance.Brightness(warped_im).enhance(random.uniform(0.8, 1.2))\n\n # adjust image temperature\n # thanks to Mark Ransom (https://stackoverflow.com/a/11888449)\n temp_r, temp_g, temp_b = random.choice(KELVIN_TABLE)\n convert_matrix = (temp_r / 255.0, 0.0, 0.0, 0.0,\n 0.0, temp_g / 255.0, 0.0, 0.0,\n 0.0, 0.0, temp_b / 255.0, 0.0)\n warped_im = warped_im.convert(\"RGB\", convert_matrix)\n\n # add noise\n noise_strength = random.uniform(5, 10)\n warped_im_arr = np.float64(np.array(warped_im))\n warped_im_arr += np.random.normal(0, noise_strength, warped_im_arr.shape)\n warped_im_arr = np.clip(warped_im_arr, 0, 255)\n warped_im = Image.fromarray(np.uint8(warped_im_arr))\n\n fname = f\"{im_name}-{hex(random.randint(2**20, 2**24))[2:]}\"\n warped_im.save(f\"data/augmented/{fname}{im_ext}\")\n with open(f\"data/augmented/{fname}.json\", \"w\") as write_file:\n data = {\n \"darts\": im_label[\"darts\"],\n \"perspective\": warped_persp.tolist()\n }\n write_file.write(json.dumps(data))\n return warped_im, warped_persp", "def preprocess(self, image):\n return img_to_array(image, data_format=self.dataFormat)", "def preprocess(path, scale=3):\n image = imread(path, is_grayscale=True)\n # Must be normalized\n image = (image-127.5 )/ 127.5 \n input_ = scipy.ndimage.interpolation.zoom(input_, (scale/1.), prefilter=False)\n return input_", "def process_image(image_path):\n with Image.open(image_path) as image:\n transform = test_transforms()\n image = transform(image).numpy()\n\n return image", "def preprocess_image(image, model_image_size):\n #resized_image = cv2.resize(image, tuple(reversed(model_image_size)), cv2.INTER_AREA)\n resized_image = letterbox_resize(image, tuple(reversed(model_image_size)))\n image_data = np.asarray(resized_image).astype('float32')\n image_data = normalize_image(image_data)\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n return image_data", "def data_augmentation(self, img):\n new_img = img.astype(float)\n # random brightness - the mask bit keeps values from going beyond (0,255)\n value = np.random.randint(-28, 28)\n if value > 0:\n mask = (new_img[:, :, 0] + value) > 255\n if value <= 0:\n mask = (new_img[:, :, 0] + value) < 0\n new_img[:, :, 0] += np.where(mask, 0, value)\n # random shadow - full height, random left/right side, random darkening\n h, w = new_img.shape[0:2]\n mid = np.random.randint(0, w)\n factor = np.random.uniform(0.6, 0.8)\n if np.random.rand() > .5:\n new_img[:, 0:mid, 0] *= factor\n else:\n new_img[:, mid:w, 0] *= factor\n return (new_img.astype(np.uint8))", "def read_img(filename, use_flat_32_type, one_channel, flip):\r\n\r\n if one_channel:\r\n img = cv2.imread(filename, -1)\r\n else:\r\n img = cv2.imread(filename)\r\n if img is None:\r\n print('in conv_data_generator.py - read_img function - image is None ; filename=', filename)\r\n return img\r\n if use_flat_32_type & (img is not None):\r\n img = img.astype(np.float32)\r\n if img.shape[:2] == (288, 512):\r\n if flip:\r\n img = cv2.flip(img, 1)\r\n return img\r\n else:\r\n print(\"something is strange here - input does not follow the normal habbit - please check or cvhange the code according to input size\")\r\n return False", "def preprocess_image(self, img_path):\n im = Image.open(img_path)\n im = im.resize((IM_WIDTH, IM_HEIGHT))\n im = np.array(im) / 255.0\n \n return im", "def load_and_process_image(self, im_path):\n image = Image.open(im_path).convert('RGB')\n image = transforms.ToTensor()(image)\n image = 2 * image - 1\n return image", "def get_preprocessed_image(file_name):\n\n im = np.array(Image.open(file_name)).astype(np.float32)\n assert im.ndim == 3, 'Only RGB images are supported.'\n im = im - _IMAGENET_MEANS\n im = im[:, :, ::-1] # Convert to BGR\n img_h, img_w, img_c = im.shape\n assert img_c == 3, 'Only RGB images are supported.'\n if img_h > 500 or img_w > 500:\n raise ValueError('Please resize your images to be not bigger than 500 x 500.')\n\n pad_h = 500 - img_h\n pad_w = 500 - img_w\n im = np.pad(im, pad_width=((0, pad_h), (0, pad_w), (0, 0)), mode='constant', constant_values=0)\n return np.expand_dims(im.astype(np.float32), 0), img_h, img_w", "def preprocess(img):\n dim=(227,227)\n resize_width = 224\n resize_height = 224\n\n img=cv2.resize(img,dim)\n #img=cv2.normalize(img,None,alpha=0,beta=1,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)\n img = img.astype(numpy.float32)\n\n #Preprocess image changing the RGB pixel values to\t the values the network needs\n # to do this we subtract the mean and multiply the std for each channel (R, G and B)\n # these mean and std values come from the stat.txt file that must accompany the\n # graph file for the network.\n \n img[:,:,0] = (img[:,:,0] - gNetworkMean[0])\n img[:,:,1] = (img[:,:,1] - gNetworkMean[1])\n img[:,:,2] = (img[:,:,2] - gNetworkMean[2])\n \n\n # Finally we return the values as Float16 rather than Float32 as that is what the network expects.\n cv2.imshow(\"Frame\", img)\n return img.astype('float16') #age_float_array.astype(numpy.float16)", "def process_image(image):\r\n image = random_brightness(image)\r\n image = crop_image(image)\r\n image = resize(image)\r\n return image", "def data_augmentation(im_list, mode='standard', tag=False, params=None, im_size=224,\n filemode='local', mean_RGB=None):\n if mean_RGB is None:\n mean_RGB = np.array([107.59348955, 112.1047813, 80.9982362])\n else:\n mean_RGB = np.array(mean_RGB)\n rot_ang = [0, 90, 180, 270]\n batch = []\n if tag:\n tag_list = im_list[:, 1]\n im_list = im_list[:, 0]\n if mode == 'minimal':\n params = {'mirror': False, 'rescale': False, 'crop_size': False}\n if mode == 'standard':\n params = {'mirror': True, 'rescale': 0.3, 'zoom': 0.3, 'crop_size': 1.}\n if mode == 'test':\n params = {'mirror': True, 'rescale': 0.1, 'zoom': 0.1, 'crop_size': .9}\n for i, filename in enumerate(im_list):\n if filemode == 'local':\n im = Image.open(filename)\n im = im.convert('RGB')\n elif filemode == 'url':\n filename = BytesIO(requests.get(filename).content)\n im = Image.open(filename)\n im = im.convert('RGB')\n if params['mirror'] and np.random.random() > 0.5:\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n if params['mirror'] and tag and tag_list[i] != 'habit':\n if np.random.random() > 0.5:\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n rot = np.random.choice(rot_ang)\n if rot == 90:\n im = im.transpose(Image.ROTATE_90)\n if rot == 180:\n im = im.transpose(Image.ROTATE_180)\n if rot == 270:\n im = im.transpose(Image.ROTATE_270)\n if params['rescale']:\n rescale = params['rescale']\n new_scale = np.random.uniform(low=1.-rescale, high=1.+rescale, size=2)\n im = im.resize((im.size * new_scale).astype(int))\n if params['crop_size']:\n zoom = np.random.rand() * params['zoom']\n crop_size = params['crop_size'] * (1.-zoom)\n ly, lx = im.size\n min_side = min([ly, lx])\n if crop_size == 1:\n crop_size -= 1e-10 # avoid low=high problem of randint generator\n if ly > lx:\n rand_x = np.random.randint(low=0, high=lx*(1.-crop_size))\n rand_y = np.random.randint(low=0, high=ly-lx*crop_size)\n else:\n rand_x = np.random.randint(low=0, high=lx-ly*crop_size)\n rand_y = np.random.randint(low=0, high=ly*(1.-crop_size))\n rand_xy = np.array([rand_y, rand_x])\n im = im.crop(np.concatenate((rand_xy, rand_xy+crop_size*min_side)))\n im = im.resize((im_size, im_size))\n batch.append(np.array(im)) # shape (N, 224, 224, 3)\n\n batch = np.array(batch) - mean_RGB[None, None, None, :] # mean centering\n batch = batch.transpose(0, 3, 1, 2) # shape(N, 3, 224, 224)\n batch = batch[:, ::-1, :, :] # switch from RGB to BGR\n return batch.astype(np.float32)", "def data_augmenter(image, label, shift, rotate, scale, intensity, flip):\n image2 = np.zeros(image.shape, dtype=np.float32)\n label2 = np.zeros(label.shape, dtype=np.int32)\n for i in range(image.shape[0]):\n # For each image slice, generate random affine transformation parameters\n # using the Gaussian distribution\n shift_val = [np.clip(np.random.normal(), -3, 3) * shift,\n np.clip(np.random.normal(), -3, 3) * shift]\n rotate_val = np.clip(np.random.normal(), -3, 3) * rotate\n scale_val = 1 + np.clip(np.random.normal(), -3, 3) * scale\n intensity_val = 1 + np.clip(np.random.normal(), -3, 3) * intensity\n\n # Apply the affine transformation (rotation + scale + shift) to the image\n row, col = image.shape[1:3]\n M = cv2.getRotationMatrix2D((row / 2, col / 2), rotate_val, 1.0 / scale_val)\n M[:, 2] += shift_val\n for c in range(image.shape[3]):\n image2[i, :, :, c] = ndimage.interpolation.affine_transform(image[i, :, :, c],\n M[:, :2], M[:, 2], order=1)\n\n # Apply the affine transformation (rotation + scale + shift) to the label map\n label2[i, :, :] = ndimage.interpolation.affine_transform(label[i, :, :],\n M[:, :2], M[:, 2], order=0)\n\n # Apply intensity variation\n image2[i] *= intensity_val\n\n # Apply random horizontal or vertical flipping\n if flip:\n if np.random.uniform() >= 0.5:\n image2[i] = image2[i, ::-1, :, :]\n label2[i] = label2[i, ::-1, :]\n else:\n image2[i] = image2[i, :, ::-1, :]\n label2[i] = label2[i, :, ::-1]\n return image2, label2", "def preprocess_image(self, inputs):\n raise NotImplementedError('preprocess_image method not implemented.')", "def load_image_gt(dataset, config, image_id, augment=False, augmentation=None):\n # Load image and mask\n image, class_id = dataset.load_image(image_id)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is depricated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n\n return image, class_id", "def image_preprocess(image, image_size, mean_rgb, stddev_rgb):\n input_processor = dataloader.DetectionInputProcessor(image, image_size)\n input_processor.normalize_image(mean_rgb, stddev_rgb)\n input_processor.set_scale_factors_to_output_size()\n image = input_processor.resize_and_crop_image()\n image_scale = input_processor.image_scale_to_original\n return image, image_scale", "def _preprocess_image(self, sample):\n image = sample[common.IMAGE]\n label = sample[common.LABELS_CLASS]\n\n original_image, image, label = input_preprocess.preprocess_image_and_label(\n image=image,\n label=label,\n crop_height=self.crop_size[0],\n crop_width=self.crop_size[1],\n min_resize_value=self.min_resize_value,\n max_resize_value=self.max_resize_value,\n resize_factor=self.resize_factor,\n min_scale_factor=self.min_scale_factor,\n max_scale_factor=self.max_scale_factor,\n scale_factor_step_size=self.scale_factor_step_size,\n ignore_label=self.ignore_label,\n is_training=self.is_training,\n model_variant=self.model_variant)\n\n sample[common.IMAGE] = image\n\n if not self.is_training:\n # Original image is only used during visualization.\n sample[common.ORIGINAL_IMAGE] = original_image\n\n if label is not None:\n sample[common.LABEL] = label\n\n # Remove common.LABEL_CLASS key in the sample since it is only used to\n # derive label and not used in training and evaluation.\n sample.pop(common.LABELS_CLASS, None)\n\n return sample", "def preprocess_train(im, boxes, classes, inst_masks, mask, input_size, min_size=2,\n use_augment=False, training_scale=[0.3, 0.5, 0.7, 1.0]):\n ori_im = np.copy(im)\n target_h, target_w = input_size\n\n # ---------- old data_augmentation ----------\n if use_augment:\n if np.random.choice([0, 1]) != 0:\n scale = np.random.choice(training_scale) # adding more small objects\n im, inst_masks, mask, boxes, classes = random_scale(im, inst_masks, mask, boxes, classes, scale=scale)\n min_obj_cover = np.random.choice([0.8, 0.9, 1.0])\n # truncted examples may lead to multiple-detections..\n im, inst_masks, mask, boxes, classes = random_aspect_ratio(im, inst_masks, mask, boxes, classes,\n min_aspect_ratio=0.5, max_aspect_ratio=2.0,\n min_obj_cover=min_obj_cover)\n #\n # # r = np.random.randint(0, 3)\n # if np.random.rand() < 0.75:\n # im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n # else:\n # im, inst_masks, mask, boxes, classes = center_crop2fixed_pad(im, inst_masks, mask, boxes, classes, target_w, target_h,\n # min_size=min_size)\n\n # ---------- old data_augmentation ----------\n\n # ---------- none data_augmentation ----------\n im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n im, inst_masks, mask, boxes, classes = random_flip(im, inst_masks, mask, boxes, classes)\n # ---------- none data_augmentation ----------\n\n # ---------- old data_augmentation ----------\n im = distort_color(im)\n # ---------- old data_augmentation ----------\n\n im = imcv2_recolor(im)\n\n # add this because zeros numpy array will cause errors in torch Dataloader\n inst_masks = np.zeros([1, target_h, target_w], dtype=inst_masks.dtype) if inst_masks.size == 0 else inst_masks\n\n boxes = np.asarray(boxes, dtype=np.float32)\n return im, boxes, classes, inst_masks, mask, ori_im", "def image_preprocessing(image_buffer, bbox, image_size, is_training):\n if is_training:\n image = _decode_and_random_crop(image_buffer, bbox, image_size)\n image = _normalize(image)\n image = tf.image.random_flip_left_right(image)\n else:\n image = _decode_and_center_crop(image_buffer, image_size)\n image = _normalize(image)\n image = tf.reshape(image, [image_size, image_size, 3])\n return image", "def preprocess_image(img):\n return (img.astype(np.float32)/255.0 - FACENET_MEAN) / FACENET_STD", "def preprocess_one_image(self):\n im = self.image\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n #plt.imshow(im)\n im_pp = image.img_to_array(im)\n im_pp = preprocess_input(im_pp)\n im_pp = im_pp.reshape(1, 224, 224, 3)\n return im_pp", "def preprocess(path, scale=3):\n image = imread(path, is_grayscale=True)\n label_ = modcrop(image, scale)\n\n # Must be normalized\n \n label_ = label_ / 255.\n \n\n\n input_ = scipy.ndimage.interpolation.zoom(label_, (1. / scale), prefilter=False)\n input_ = scipy.ndimage.interpolation.zoom(input_, (scale / 1.), prefilter=False)\n\n return input_, label_", "def preprocess(data):\n # expect image to be passed in as BGR\n rgb_data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)\n batched_rgb_data = np.expand_dims(rgb_data, axis = 0).astype('float64')\n new_data = resnet50.preprocess_input(batched_rgb_data)[0]\n\n return new_data", "def preprocess_image_train(csv_line):\n k = np.random.randint(3) # one each for center, left, right\n\n if (k == 0):\n file = csv_line[\"left\"][0].strip()\n view_shift = CAMERA_SHIFT\n elif(k == 1):\n file = csv_line[\"center\"][0].strip()\n view_shift = 0\n elif(k == 2):\n file = csv_line[\"right\"][0].strip()\n view_shift = -CAMERA_SHIFT\n\n steering_angle = csv_line[\"steering_angle\"][0] + view_shift\n\n img = cv2.imread(file)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img, steering_angle, translate_x = height_width_shift(img, steering_angle, 100)\n img = brightness_shift(img)\n img = preprocessImage(img)\n img = np.array(img)\n\n # randomly flip the image\n if np.random.random() < 0.5:\n img, steering_angle = horizontal_flip(img, steering_angle)\n\n return img, steering_angle", "def aortic_data_augmenter(image, label, shift, rotate, scale, intensity, flip):\n image2 = np.zeros(image.shape, dtype=np.float32)\n label2 = np.zeros(label.shape, dtype=np.int32)\n\n # For N image. which come come from the same subject in the LSTM model,\n # generate the same random affine transformation parameters.\n shift_val = [np.clip(np.random.normal(), -3, 3) * shift,\n np.clip(np.random.normal(), -3, 3) * shift]\n rotate_val = np.clip(np.random.normal(), -3, 3) * rotate\n scale_val = 1 + np.clip(np.random.normal(), -3, 3) * scale\n intensity_val = 1 + np.clip(np.random.normal(), -3, 3) * intensity\n\n # The affine transformation (rotation + scale + shift)\n row, col = image.shape[1:3]\n M = cv2.getRotationMatrix2D(\n (row / 2, col / 2), rotate_val, 1.0 / scale_val)\n M[:, 2] += shift_val\n\n # Apply the transformation to the image\n for i in range(image.shape[0]):\n for c in range(image.shape[3]):\n image2[i, :, :, c] = ndimage.interpolation.affine_transform(\n image[i, :, :, c], M[:, :2], M[:, 2], order=1)\n\n label2[i, :, :] = ndimage.interpolation.affine_transform(\n label[i, :, :], M[:, :2], M[:, 2], order=0)\n\n # Apply intensity variation\n image2[i] *= intensity_val\n\n # Apply random horizontal or vertical flipping\n if flip:\n if np.random.uniform() >= 0.5:\n image2[i] = image2[i, ::-1, :, :]\n label2[i] = label2[i, ::-1, :]\n else:\n image2[i] = image2[i, :, ::-1, :]\n label2[i] = label2[i, :, ::-1]\n return image2, label2", "def process_image(self, image):\r\n img = cv2.imread(image)\r\n img = img.astype(float)/127 - 1\r\n return np.expand_dims(img, axis=0)", "def preprocess_input(img):\n img /= 255.\n img -= 0.5\n img *= 2.\n return img", "def preprocess_image(filename):\n\n image_string = tf.io.read_file(filename)\n image = tf.image.decode_jpeg(image_string, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n image = tf.image.resize(image, target_shape)\n return image", "def process_path(file_path: str):\r\n img = tf.io.read_file(file_path)\r\n img = tf.image.decode_jpeg(img, channels=3)\r\n img = tf.image.resize(img, [IMG_SIZE, IMG_SIZE])\r\n return tf.keras.applications.efficientnet.preprocess_input(img) # Shape: IMG_SIZE x IMG_SIZE x 3\r", "def load_and_preprocess_image(path):\n\n img = cv2.imread(path, 0) # Load image into greyscale\n img = cv2.equalizeHist(img) # Histogram equilization\n return img", "def preprocess(self, im, mean=None):\r\n # preprocessing for all pretrained pytorch models\r\n if mean:\r\n im = resize(im, (227, 227)) - mean\r\n else:\r\n mean = np.array([104, 117, 123])\r\n im = resize(im, (227, 227)) - mean\r\n im = image_io.image_to_tensor(im)\r\n return im", "def pre_processing_image(img):\n\n #print(img.shape)\n # apply gamma correction and show the images\n #adjusted = adjust_gamma(img, gamma=0.65)\n\n adjusted = exposure.adjust_gamma(img, gamma=1.65)\n #print(adjusted.shape)\n\n # log transform of image\n\n logarithmic_corrected = exposure.adjust_log(adjusted, 1)\n #print(logarithmic_corrected.shape)\n\n # denoising\n #dst2 = cv2.fastNlMeansDenoisingColored(logarithmic_corrected, None, 10, 10, 7, 21)\n #print(dst2.shape)\n dst2 = logarithmic_corrected\n return dst2", "def load_and_prep_image(filename, img_shape=224):\n # Read in the image\n img = tf.io.read_file(filename)\n # Decode the read file into a tensor\n img = tf.image.decode_image(img)\n #Resize the image\n img = tf.image.resize(img,size = [img_shape, img_shape])\n # Rescale the image (get all the values btw 0 and 1)\n img = img/255.\n return img", "def load_image_gt(dataset, config, image_id, augmentation=None):\n # Load image and mask\n image = dataset.load_image(image_id)\n global_mask, mask, class_ids, class_ids2, text_embeds, embed_lengths = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n # TODO\n # global_mask = utils.resize_mask(global_mask, scale, padding, crop)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation and dataset.image_info[image_id]['source'] not in config.NO_AUGMENT_SOURCES:\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # global_mask = det.augment_image(global_mask)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n if config.SOFT_MASK:\n mask *= 255\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n global_mask = det.augment_image(global_mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n \n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n if not config.SOFT_MASK:\n mask = mask.astype(np.bool)\n global_mask = global_mask.astype(np.bool)\n else:\n mask = np.array(mask/255., np.float32)\n\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n \n class_ids = class_ids[_idx]\n class_ids2 = class_ids2[_idx]\n # NOTE NOTE NOTE if label2 is derection, augmentation mast be care hare\n # ------------------------------------------------------------\n def rot90_augment(image, mask, global_mask, class_ids2):\n k = random.choice([0, 1, 2, 3])\n if k:\n image = np.rot90(image, k)\n mask = np.rot90(mask, k)\n global_mask = np.rot90(global_mask, k)\n map_dict = {1: dict(zip([0,1,2,3], [1,2,3,0])),\n 2: dict(zip([0,1,2,3], [2,3,0,1])),\n 3: dict(zip([0,1,2,3], [3,0,1,2]))}\n class_ids2 = np.array([map_dict[k][i] for i in class_ids2])\n return image, mask, global_mask, class_ids2\n image, mask, global_mask, class_ids2 = rot90_augment(image, mask, global_mask, class_ids2)\n text_embeds = text_embeds[_idx]\n embed_lengths = embed_lengths[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox, mask_score = utils.extract_bboxes(mask)\n rbbox = utils.extract_minienclose_bboxes(mask)\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n # print (\"dataset.source_class_ids\", dataset.source_class_ids)\n # dataset.source_class_ids {'': [0], 'coco_label2': [0, 8, 9, 10, 11], 'coco': [0, 1, 2, 3, 4, 5, 6, 7]}\n source_class_ids2 = dataset.source_class_ids['coco_label2']\n active_class_ids[source_class_ids2[1: ]] = 1\n active_class_ids2 = active_class_ids[config.NUM_CLASSES: ]\n active_class_ids = active_class_ids[: config.NUM_CLASSES]\n \n # Resize masks to smaller size to reduce memory usage\n if config.USE_MINI_MASK:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE, softmask=config.SOFT_MASK)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids, active_class_ids2)\n\n\n return image, image_meta, class_ids, class_ids2, bbox, rbbox, global_mask, mask, mask_score, text_embeds, embed_lengths", "def augment(self, image):\n pass", "def image_preprocess(image: str):\n if type(image) == str:\n image = cv2.imread(image)\n \n image_cropped = image[25:375, :]\n \n image = cv2.resize(image_cropped, IMG_SIZE, interpolation = cv2.INTER_AREA)\n \n return image", "def parse_img(image_path):\n image = tf.read_file(image_path)\n image = tf.image.decode_image(image)\n image = tf.reshape(image, [INITIAL_RES, INITIAL_RES, 3])\n image = tf.image.resize_images(image, [OUTPUT_RES, OUTPUT_RES])\n #image = image[:, :, ::-1] # BGE -> RGB conversion if needed?\n #image = tf.image.rgb_to_grayscale(image)\n #image = tf.image.convert_image_dtype(image, tf.float32) # In neuralNet.py\n image = image.eval() # Convert from tensor to Numpy array for Keras\n return image", "def _read_image(path):\n data = tf.read_file(path)\n image = tf.image.decode_image(data, channels=3)\n image.set_shape((None, None, 3))\n float_shape = tf.cast(tf.shape(image), tf.float32)\n rows, cols = float_shape[0], float_shape[1]\n max_size = float(IMAGE_SIZE + IMAGE_AUGMENTATION_BORDER)\n new_shape = tf.cond(rows < cols,\n true_fn=lambda: (max_size, cols/rows * max_size),\n false_fn=lambda: (rows/cols * max_size, max_size))\n new_shape = tf.cast(tf.ceil(tf.stack(new_shape)), tf.int32)\n image = tf.image.resize_images(image, new_shape)\n image = tf.random_crop(image, [IMAGE_SIZE, IMAGE_SIZE, 3])\n image = tf.image.random_flip_left_right(image)\n image = tf.image.random_hue(image, 0.1)\n image = tf.image.random_brightness(image, 0.1)\n return tf.cast(image, tf.float32) / 0xff", "def preprocess_image(image_bytes,\n is_training=False,\n augmentation=None,\n use_bfloat16=False,\n saturate_uint8=False,\n scale_and_center=False,\n use_default_augment=False):\n if is_training:\n image = _decode_and_random_crop(image_bytes)\n else:\n image = _decode_and_center_crop(image_bytes)\n image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 3])\n # decode and crop returns float32 image with values in range [0, 255]\n if saturate_uint8:\n image = tf.saturate_cast(image, tf.uint8)\n # do augmentations if necessary\n if use_default_augment and is_training:\n image = tf.image.random_flip_left_right(image)\n if augmentation is not None:\n tensors_dict = augmentation(image)\n else:\n tensors_dict = {'image': image}\n # cast and rescale all image tensors\n dtype = tf.bfloat16 if use_bfloat16 else tf.float32\n for k, v in tensors_dict.items():\n if k.endswith('image'):\n v = tf.cast(v, dtype)\n if scale_and_center:\n v = v / tf.constant(127.5, dtype) - tf.constant(1.0, dtype)\n tensors_dict[k] = v\n return tensors_dict", "def data_augmentation(image_data, mask_data, rotate=False, vertical_flip=False, horizontal_flip=False):\n aug_images = []\n aug_masks = []\n\n for _ in range(len(image_data)):\n if rotate:\n rotation = A.RandomRotate90(p=1)\n rotated_data = rotation(image=image_data[_], mask=mask_data[_])\n rotated_image = rotated_data['image']\n rotated_mask = rotated_data['mask']\n aug_images.append(rotated_image)\n aug_masks.append(rotated_mask)\n\n if vertical_flip:\n flip_v = A.VerticalFlip(p=1)\n vertical_data = flip_v(image=image_data[_], mask=mask_data[_])\n vertical_image = vertical_data['image']\n vertical_mask = vertical_data['mask']\n aug_images.append(vertical_image)\n aug_masks.append(vertical_mask)\n\n if horizontal_flip:\n flip_h = A.HorizontalFlip(p=1)\n horizontal_data = flip_h(image=image_data[_], mask=mask_data[_])\n horizontal_image = horizontal_data['image']\n horizontal_mask = horizontal_data['mask']\n aug_images.append(horizontal_image)\n aug_masks.append(horizontal_mask)\n\n nd_images = make_ndarray(aug_images)\n nd_masks = make_ndarray(aug_masks)\n #nd_images = np.zeros((len(aug_images), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32)\n #nd_masks = np.zeros((len(aug_masks), IMG_HEIGHT, IMG_WIDTH), dtype=np.float32)\n\n #for _ in range(len(aug_images)): # Load into ndarray\n # nd_images[_] = aug_images[_]\n # nd_masks[_] = aug_masks[_] # load mask without channel variable\n\n return nd_images, nd_masks", "def read_img(self, path):\n img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n resized = cv2.resize(img, (28, 28))\n inverted = np.invert(np.array(resized).reshape((28, 28, 1)))\n denoised_and_reshaped = de_noise(inverted).reshape((28, 28, 1))\n normalized = denoised_and_reshaped / 255.\n return normalized", "def image_augmentation(dataset_dict):\n dataset_dict = copy.deepcopy(dataset_dict)\n image = utils.read_image(dataset_dict[\"file_name\"], format=\"BGR\")\n\n transform_list = [\n T.RandomCrop(crop_type=\"relative_range\", crop_size=[0.95, 0.87]),\n T.RandomBrightness(0.9, 1.5),\n T.RandomContrast(0.8, 1.6),\n T.RandomSaturation(1.0, 1.6),\n T.RandomRotation(angle=[15, 0, 5, 6, 15], expand=False),\n T.RandomFlip(prob=0.5, horizontal=True, vertical=False),\n T.ResizeScale(1.0, 2.0, target_height=900, target_width=700)\n ]\n\n image, transforms = T.apply_transform_gens(transform_list, image)\n dataset_dict[\"image\"] = torch.as_tensor(image.transpose(2, 0, 1).astype(\"float32\"))\n\n annotations = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n instances = utils.annotations_to_instances(annotations, image.shape[:2])\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n\n return dataset_dict", "def train_image_parse_function(filename, *argv):\n image = read_image(filename)\n image = tf.image.random_flip_left_right(image)\n\n if FLAGS.augmentation:\n print('data augmentation')\n resized_image = resize_and_random_crop_image(image)\n else:\n resized_image = resize_image(image)\n resized_image = scale_image_value(resized_image)\n\n if len(argv) == 1:\n return resized_image, argv[0]\n elif len(argv) == 2:\n return resized_image, argv[0], argv[1]\n else:\n return resized_image", "def preprocess_img(img):\n # Make a copy of img as array\n img = np.array(img)\n\n # Convert into tensor\n img = torch.Tensor(img).permute(2, 0, 1) / 255.0\n\n # Normalize\n for t, m, s in zip(img, TORCH_IMG_MEAN, TORCH_IMG_STD):\n t.sub_(m).div_(s)\n\n return img", "def load_image(image_path, label, augment=False, crop_10=False):\n image = cv2.imread(image_path.numpy().decode()).astype(np.float32)\n\n if augment:\n image = random_aspect(image)\n image = random_size(image)\n image = random_crop(image)\n image = random_flip(image)\n image = random_hsv(image)\n image = random_pca(image)\n else:\n image = random_size(image, target_size=256)\n if crop_10:\n image = test_10_crop(image)\n else:\n image = center_crop(image)\n\n image = normalize(image)\n\n label_one_hot = np.zeros(c.category_num)\n label_one_hot[label] = 1.0\n\n return image, label_one_hot", "def read_and_decode(filename, is_train=None):\n filename_queue = tf.train.string_input_producer([filename])\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example, features={\n 'label': tf.FixedLenFeature([], tf.int64),\n 'img_raw': tf.FixedLenFeature([], tf.string),\n }\n )\n # You can do more image distortion here for training data\n img = tf.decode_raw(features['img_raw'], tf.float32)\n img = tf.reshape(img, [32, 32, 3])\n # img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5\n if is_train ==True:\n # 1. Randomly crop a [height, width] section of the image.\n img = tf.random_crop(img, [24, 24, 3])\n\n # 2. Randomly flip the image horizontally.\n img = tf.image.random_flip_left_right(img)\n\n # 3. Randomly change brightness.\n img = tf.image.random_brightness(img, max_delta=63)\n\n # 4. Randomly change contrast.\n img = tf.image.random_contrast(img, lower=0.2, upper=1.8)\n\n # 5. Subtract off the mean and divide by the variance of the pixels.\n img = tf.image.per_image_standardization(img)\n\n elif is_train == False:\n # 1. Crop the central [height, width] of the image.\n img = tf.image.resize_image_with_crop_or_pad(img, 24, 24)\n\n # 2. Subtract off the mean and divide by the variance of the pixels.\n img = tf.image.per_image_standardization(img)\n\n elif is_train == None:\n img = img\n\n label = tf.cast(features['label'], tf.int32)\n return img, label", "def read_for_training(p):\n return read_cropped_image_npy(p, True)", "def load_image_gt(dataset, config, image_id, augmentation=None):\n # Load image and mask\n image = dataset.load_image(image_id)\n global_mask, mask, class_ids, class_ids2, text_embeds, embed_lengths = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n # TODO\n # global_mask = utils.resize_mask(global_mask, scale, padding, crop)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # global_mask = det.augment_image(global_mask)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n if config.SOFT_MASK:\n mask *= 255\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n global_mask = det.augment_image(global_mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n \n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n if not config.SOFT_MASK:\n mask = mask.astype(np.bool)\n global_mask = global_mask.astype(np.bool)\n else:\n mask = np.array(mask/255., np.float32)\n\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n \n class_ids = class_ids[_idx]\n class_ids2 = class_ids2[_idx]\n # NOTE NOTE NOTE if label2 is derection, augmentation mast be care hare\n # ------------------------------------------------------------\n def rot90_augment(image, mask, global_mask, class_ids2):\n k = np.random.choice([0, 1, 2, 3])\n if k:\n image = np.rot90(image, k)\n mask = np.rot90(mask, k)\n global_mask = np.rot90(global_mask, k)\n map_dict = {1: dict(zip([0,1,2,3], [1,2,3,0])),\n 2: dict(zip([0,1,2,3], [2,3,0,1])),\n 3: dict(zip([0,1,2,3], [3,0,1,2]))}\n class_ids2 = np.array([map_dict[k][i] for i in class_ids2])\n return image, mask, global_mask, class_ids2\n image, mask, global_mask, class_ids2 = rot90_augment(image, mask, global_mask, class_ids2)\n text_embeds = text_embeds[_idx]\n embed_lengths = embed_lengths[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox, mask_score = utils.extract_bboxes(mask)\n rbbox = utils.extract_minienclose_bboxes(mask)\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n # print (\"dataset.source_class_ids\", dataset.source_class_ids)\n # dataset.source_class_ids {'': [0], 'coco_label2': [0, 8, 9, 10, 11], 'coco': [0, 1, 2, 3, 4, 5, 6, 7]}\n source_class_ids2 = dataset.source_class_ids['coco_label2']\n active_class_ids[source_class_ids2[1: ]] = 1\n active_class_ids2 = active_class_ids[config.NUM_CLASSES: ]\n active_class_ids = active_class_ids[: config.NUM_CLASSES]\n \n # Resize masks to smaller size to reduce memory usage\n if config.USE_MINI_MASK:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE, softmask=config.SOFT_MASK)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids, active_class_ids2)\n\n\n return image, image_meta, class_ids, class_ids2, bbox, rbbox, global_mask, mask, mask_score, text_embeds, embed_lengths", "def feed_input(self, image):\n image_preprocessed = self._preprocess_input(image, width=self.input_shape[3], height=self.input_shape[2])\n super().feed_input(image_preprocessed)", "def cmp_data_aug_image(train_dataset, train_dir):\n target_class = random.choice(train_dataset.class_names)\n target_dir = train_dir + '/' + target_class\n random_image = random.choice(os.listdir(target_dir))\n random_image_path = target_dir + '/' + random_image\n print(random_image_path)\n\n # Read and plot in the random image\n img = mpimg.imread(random_image_path)\n plt.imshow(img)\n plt.title(f\"Original Image from class: {target_class}\")\n plt.axis(False)\n\n # Now let's plot our augmented random image\n augmented_img = data_augmentation(tf.expand_dims(img, axis=0))\n plt.figure()\n plt.imshow(tf.squeeze(augmented_img/255.)) #Invalid shape (1, 553, 440, 3) for image data - squeezed after getting this error\n plt.title(f\"Augmented Image from class: {target_class}\")\n plt.axis(False)", "def test_data_augmentation_transforms():\n\n transforms_list = get_data_augmentation_transforms(inp_size=(100, 50), pixel_mean=[0.5], pixel_std=[0.3]).transforms\n\n assert len(transforms_list) > 3\n\n # last 3 should be fundamental\n augmentation_transforms = Compose(transforms_list[:-3])\n\n try:\n inp_img = Image.fromarray(np.loadtxt(\"proj6_code/proj6_unit_tests/test_data/transform_inp.txt\", dtype=\"uint8\"))\n\n except:\n inp_img = Image.fromarray(\n np.loadtxt(\"../proj6_code/proj6_unit_tests/test_data/transform_inp.txt\", dtype=\"uint8\")\n )\n augmented_img = augmentation_transforms(inp_img)\n assert isinstance(augmented_img, type(inp_img))\n assert not np.array_equal(augmented_img, inp_img)", "def _preprocess_image(self, image):\n img = tf.convert_to_tensor(image)\n # img = tf.io.decode_image(img, channels=3)\n img = tf.image.convert_image_dtype(img, tf.uint8)\n\n resized_img = tf.image.resize(img, (self._input_height, self._input_width))\n resized_img = resized_img[tf.newaxis, :]\n return resized_img", "def _preprocess_fn(data):\n\n # Validate input\n if not isinstance(data, dict) or 'image' not in data:\n raise ValueError('Argument `data` must be a dictionary, '\n 'not %s' % str(type(data)))\n\n # Apply all the individual steps in sequence.\n image = data['image']\n image = decode_image(image)\n image = normalize_value_range(image)\n image = get_multiscale_patches(image, **preprocessing_kwargs)\n\n data['image'] = image\n return data", "def augment_img(img):\n img = random_hflip_img(img)\n img = cutout_img(img, size=12)\n img = zero_pad_and_crop_img(img)\n return img", "def preprocess_data(X):\n # NOTE: # If you have conducted any pre-processing on the image,\n # please implement this function to apply onto test images.\n return X", "def loader(path):\n img = np.load(path)\n img = img[1:4]\n if np.random.choice((True, False)):\n img = img[:, :, ::-1]\n img = np.array(img)\n if np.random.choice((True, False)):\n img = img[:, ::-1, :]\n img = np.array(img)\n\n img = img.transpose((1, 2, 0)) # pytorch is going to rotate it back\n return img", "def preprocess(img):\n\t#if \"thresh\" in args[\"preprocess\"]:\n\timage = cv2.threshold(img, 0, 255,\n\t\tcv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\t\"\"\"\n\tmake a check to see if median blurring should be done to remove\n\tnoise\n\t\"\"\"\n\t#if \"blur\" in args[\"preprocess\"]:\n\t\t#image = cv2.medianBlur(gray, 3)\n\treturn image", "def preprocess_fn(img_id, image, box, is_training):\n cv2.setNumThreads(2)\n\n def _infer_data(image, input_shape):\n img_h, img_w, _ = image.shape\n input_h, input_w = input_shape\n\n image = cv2.resize(image, (input_w, input_h))\n\n # When the channels of image is 1\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=-1)\n image = np.concatenate([image, image, image], axis=-1)\n\n return img_id, image, np.array((img_h, img_w), np.float32)\n\n def _data_aug(image, box, is_training, image_size=(600, 600)):\n \"\"\"Data augmentation function.\"\"\"\n ih, iw, _ = image.shape\n w, h = image_size\n\n if not is_training:\n return _infer_data(image, image_size)\n\n # Random crop\n box = box.astype(np.float32)\n image, box = random_sample_crop(image, box)\n ih, iw, _ = image.shape\n\n # Resize image\n image = cv2.resize(image, (w, h))\n\n # Flip image or not\n flip = _rand() < .5\n if flip:\n image = cv2.flip(image, 1, dst=None)\n\n # When the channels of image is 1\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=-1)\n image = np.concatenate([image, image, image], axis=-1)\n\n box[:, [0, 2]] = box[:, [0, 2]] / ih\n box[:, [1, 3]] = box[:, [1, 3]] / iw\n\n if flip:\n box[:, [1, 3]] = 1 - box[:, [3, 1]]\n\n box, label, num_match = retinanet_bboxes_encode(box)\n return image, box, label, num_match\n\n return _data_aug(image, box, is_training, image_size=config.img_shape)", "def preprocess_image(image, augment = False, central_crop_size = None, num_towers = 4):\n with tf.variable_scope('PreprocessImage'):\n image = tf.image.convert_image_dtype(image, dtype = tf.float32)\n if augment or central_crop_size:\n if num_towers == 1:\n images = [image]\n else:\n images = tf.split(value = image, num_or_size_splits = num_towers, axis = 1)\n if central_crop_size:\n view_crop_size = (central_crop_size[0] / num_towers, central_crop_size[1])\n images = [central_crop(img, view_crop_size) for img in images]\n if augment:\n images = [augment_image(img) for img in images]\n image = tf.concat(images, 1)\n\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.5)\n\n return tf.split(value = image, num_or_size_splits = num_towers, axis = 1)[0]", "def imagenet_preprocess(image, label):\n i = image\n i = tf.cast(i, tf.float32)\n i = tf.image.resize_with_crop_or_pad(i, 224, 224)\n if model_name == 'ResNet50' or model_name == 'ResNet152':\n i = tf.keras.applications.resnet.preprocess_input(i)\n else:\n i = tf.keras.applications.densenet.preprocess_input(i)\n return (i, label)", "def preprocess_image(image_buffer, train=True):\n\n image = decode_jpeg(image_buffer)\n height = FLAGS.image_size\n width = FLAGS.image_size\n\n # Resize the image to the original height and width.\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image, [height, width],\n align_corners=False)\n image = tf.squeeze(image, [0])\n\n # Finally, rescale to [-1,1] instead of [0, 1)\n image = tf.sub(image, 0.5)\n image = tf.mul(image, 2.0)\n return image", "def __call__(self, in_data):\n # There are five data augmentation steps\n # 1. Color augmentation\n # 2. Random expansion\n # 3. Random cropping\n # 4. Resizing with random interpolation\n # 5. Random horizontal flipping\n if self.count % 10 == 0 and self.count % self.batchsize == 0 and self.count != 0:\n self.i += 1\n i = self.i % len(self.dim)\n self.output_shape = (self.dim[i], self.dim[i])\n # print(self.count, self.i, self.output_shape)\n self.count += 1\n\n img, bbox, label = in_data\n\n # 1. Color augmentation\n img = random_distort(img, brightness_delta=32,\n contrast_low=0.5, contrast_high=1.5,\n saturation_low=0.5, saturation_high=1.5,\n hue_delta=25)\n\n # Normalize. range is [0, 1]\n img /= 255.0\n\n _, H, W = img.shape\n scale = np.random.uniform(0.25, 2)\n random_expand = np.random.uniform(0.8, 1.2, 2)\n net_h, net_w = self.output_shape\n out_h = net_h * scale # random_expand[0]\n out_w = net_w * scale # random_expand[1]\n if H > W:\n out_w = out_h * (float(W) / H) * np.random.uniform(0.8, 1.2)\n elif H < W:\n out_h = out_w * (float(H) / W) * np.random.uniform(0.8, 1.2)\n\n out_h = int(out_h)\n out_w = int(out_w)\n\n img = resize_with_random_interpolation(img, (out_h, out_w))\n bbox = transforms.resize_bbox(bbox, (H, W), (out_h, out_w))\n\n if out_h < net_h and out_w < net_w:\n img, param = expand(img, out_h=net_h, out_w=net_w,\n fill=self.value, return_param=True)\n bbox = transforms.translate_bbox(\n bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])\n else:\n out_h = net_h if net_h > out_h else int(out_h * 1.05)\n out_w = net_w if net_w > out_w else int(out_w * 1.05)\n img, param = expand(img, out_h=out_h, out_w=out_w,\n fill=self.value, return_param=True)\n bbox = transforms.translate_bbox(\n bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])\n\n img, param = crop_with_bbox_constraints(\n img, bbox, return_param=True,\n crop_height=net_h, crop_width=net_w)\n bbox, param = transforms.crop_bbox(\n bbox, y_slice=param['y_slice'], x_slice=param['x_slice'],\n allow_outside_center=False, return_param=True)\n label = label[param['index']]\n\n\n # 5. Random horizontal flipping # OK\n img, params = transforms.random_flip(\n img, x_random=True, return_param=True)\n bbox = transforms.flip_bbox(\n bbox, self.output_shape, x_flip=params['x_flip'])\n\n # Preparation for Yolov2 network\n bbox[:, ::2] /= self.output_shape[0] # y\n bbox[:, 1::2] /= self.output_shape[1] # x\n\n num_bbox = len(bbox)\n len_max = max(num_bbox, self.max_target)\n\n gmap = create_map_anchor_gt(bbox, self.anchors, self.output_shape,\n self.downscale, self.n_boxes, len_max)\n\n out_bbox = np.zeros((len_max, 4), dtype='f')\n out_bbox[:num_bbox] = bbox[:num_bbox]\n out_label = np.zeros((len_max), dtype='i')\n out_label[:num_bbox] = label\n\n gmap = gmap[:self.max_target]\n out_bbox = out_bbox[:self.max_target]\n out_label = out_label[:self.max_target]\n num_array = min(num_bbox, self.max_target)\n\n img = np.clip(img, 0, 1)\n return img, out_bbox, out_label, gmap, np.array([num_array], dtype='i')", "def preprocessImage(img):\n shape = img.shape\n img = img[math.floor(shape[0]/4) : shape[0] - 25, 0:shape[1]]\n img = cv2.resize(img, (img_columns, img_rows), interpolation = cv2.INTER_AREA)\n return img" ]
[ "0.6944267", "0.6885695", "0.6801366", "0.6746409", "0.6730426", "0.6700084", "0.6674317", "0.6642256", "0.6604827", "0.65799797", "0.65796673", "0.6575271", "0.6572347", "0.6542871", "0.6535678", "0.6521868", "0.64719284", "0.64391285", "0.6430673", "0.6425564", "0.6418553", "0.63944936", "0.6359762", "0.63452995", "0.6345004", "0.63389987", "0.6315763", "0.6315252", "0.6310417", "0.6308325", "0.6284506", "0.62664473", "0.6263605", "0.6257267", "0.6234239", "0.6203765", "0.62015784", "0.6180643", "0.6180255", "0.61700445", "0.61690724", "0.6164018", "0.6163977", "0.61560243", "0.6155601", "0.6150372", "0.61460894", "0.6142429", "0.6140535", "0.6138248", "0.6136851", "0.613612", "0.6132722", "0.6127849", "0.6120276", "0.61175954", "0.610489", "0.610019", "0.60997254", "0.60947174", "0.6091267", "0.608965", "0.6088806", "0.6086736", "0.6086612", "0.60831153", "0.60787815", "0.6074412", "0.6049668", "0.6047777", "0.60415375", "0.6036259", "0.6034147", "0.60316366", "0.602994", "0.60196346", "0.601062", "0.60040987", "0.5999003", "0.5994789", "0.59923786", "0.5980464", "0.59800476", "0.59781265", "0.59739983", "0.5973663", "0.59692925", "0.59520364", "0.5951948", "0.59508634", "0.5948588", "0.5946806", "0.59417313", "0.5936421", "0.5930055", "0.592037", "0.5912311", "0.58991885", "0.5898162", "0.58933717" ]
0.70998
0
Helper function to visualize mask on the top of the car
def mask_overlay(image, mask, color=(0, 255, 0)): mask = np.dstack((mask, mask, mask)) * np.array(color) mask = mask.astype(np.uint8) weighted_sum = cv2.addWeighted(mask, 0.5, image, 0.5, 0.) img = image.copy() ind = mask[:, :, 1] > 0 img[ind] = weighted_sum[ind] return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_mask(image, mask): \n plt.subplot(1,2,1)\n plt.title('image')\n plt.imshow(image)\n plt.subplot(1,2,2)\n plt.title('mask')\n plt.imshow(mask)\n plt.show()", "def mask_show(image, mask, groups, name=\"image\"):\n img = cv2.addWeighted(image, 0.4, mask, 0.6, 0)\n img = sg.mark_boundaries(img, groups, color=(1,1,1))\n cv2.imshow(name, img)\n cv2.waitKey(0)", "def display_mask(i):\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)", "def test(shape=(1000,2000)):\n mask = Mask()\n mask.addCircle(400,300,250)\n mask.subtractCircle(400,300,150)\n mask.addRectangle(350,250,1500,700)\n plt.imshow( mask.getMask(shape) )\n return mask", "def show_holes_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n radius=9\n out_image = img.copy()\n out_image = cv2.cvtColor(out_image, cv2.COLOR_GRAY2RGB)\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n y_center = (dy.start + dy.stop - 1)/2 \n center=(x_center,y_center)\n cv2.circle(out_image, center, radius,(111,17,108),thickness=2)\n\n plt.figure()\n plt.imshow(out_image)\n plt.autoscale(False)\n return out_image", "def show_points_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2 \n y.append(y_center)\n plt.figure()\n plt.imshow(img)\n plt.autoscale(False)\n plt.plot(x,y, \"o\")", "def show_image(self, mask=np.ones((32, 32), dtype=bool)):\n image = np.copy(self.__image)\n image[~mask] = 0\n plt.imshow(image, aspect=\"auto\")\n plt.show()", "def vision(image):\n vis_map = resize(image, alpha, beta)\n print(\"Resized map from the blue mask\")\n\n world = rotate(vis_map)\n\n plt.figure()\n plt.imshow(world[:, :, ::-1])\n plt.show()\n object_grid, occupancy_grid = detect_object(world)\n print(\"Result of the red mask\")\n plt.figure()\n plt.imshow(occupancy_grid)\n plt.show()\n return object_grid, occupancy_grid, world", "def _draw_mask_on_image(self, mask):\n mask = self.STANDARD_COLORS_ARRAY[mask]\n cv2.addWeighted(mask,self.config.ALPHA,self.image,1.0,0,self.image)", "def imshow_overlay(im, mask, alpha=0.5, color='red', **kwargs):\n mask = mask > 0\n mask = ma.masked_where(~mask, mask) \n plt.imshow(im, **kwargs)\n plt.imshow(mask, alpha=alpha, cmap=ListedColormap([color]))", "def show_mask(self):\n print(\" h k mask\")\n for a, b, c in zip(self.h, self.k, self.mask):\n print(\"{0: 1d} {1: 1d} {2:s}\".format(a, b, c))", "def make_lungmask(img, display=False):\n row_size= img.shape[0]\n col_size = img.shape[1]\n \n mean = np.mean(img)\n std = np.std(img)\n img = img-mean\n img = img/std\n\n # uses hounsfield values near lungs to normalize images\n\n middle = img[int(col_size/5):int(col_size/5*4),int(row_size/5):int(row_size/5*4)] \n mean = np.mean(middle) \n max = np.max(img)\n min = np.min(img)\n img[img==max]=mean\n img[img==min]=mean\n \n # uses kmeans to separate foreground (soft tissue / bone) and background (lung/air)\n\n kmeans = KMeans(n_clusters=2).fit(np.reshape(middle,[np.prod(middle.shape),1]))\n centers = sorted(kmeans.cluster_centers_.flatten())\n threshold = np.mean(centers)\n thresh_img = np.where(img<threshold,1.0,0.0)\n\n # performs erosion and dilation\n\n eroded = morphology.erosion(thresh_img,np.ones([3,3]))\n dilation = morphology.dilation(eroded,np.ones([8,8]))\n\n labels = measure.label(dilation) # Different labels are displayed in different colors\n label_vals = np.unique(labels)\n regions = measure.regionprops(labels)\n good_labels = []\n for prop in regions:\n B = prop.bbox\n if B[2]-B[0]<row_size/10*9 and B[3]-B[1]<col_size/10*9 and B[0]>row_size/5 and B[2]<col_size/5*4:\n good_labels.append(prop.label)\n mask = np.ndarray([row_size,col_size],dtype=np.int8)\n mask[:] = 0\n\n # makes mask\n\n for N in good_labels:\n mask = mask + np.where(labels==N,1,0)\n mask = morphology.dilation(mask,np.ones([10,10])) # one last dilation\n final = mask * img\n \n # shows and saves output\n\n plt.imshow(final)\n im = Image.fromarray(final*128)\n im = im.convert(\"L\")\n im.save(S)\n \n return", "def vis_mask(img, mask,width,height, col, alpha=0.4, show_border=True, border_thick= -1):\n\n img = img.astype(np.float32)\n idx = np.nonzero(mask)\n #np.PredictionBoxes(col)\n img[idx[0], idx[1], :] *= 1.0 - alpha\n img[idx[0], idx[1], :] += alpha * (400/255.0)\n\n if show_border:\n _, contours, _ = cv2.findContours(\n mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n cv2.drawContours(img, contours, -1,col, border_thick, cv2.LINE_AA)\n #cv2.drawContours(c, contours, -1, 1, border_thick, cv2.LINE_AA)\n\n return img.astype(np.uint8)", "def Inpainting_mosaic(im, mask):\n im0 = np.copy(im) # Lager kopi av bilde\n im[im < 0] = 0 # klipp til lovlige verdier\n im[im > 1] = 1\n for i in range(25): # Itererer \n im = eksplisitt(im, n=1) # Løser ved hjelp av eksplisittfunksjon\n im[np.logical_not(mask)] = im0[np.logical_not(mask)] \n return im", "def overlay(image, mask):\n if len(image.shape) == 3:\n image = image[:, :, 0]\n if len(mask.shape) == 3:\n mask = mask[:, :, 0]\n if np.amax(image) > 100:\n image = image / 255\n\n masked = np.ma.masked_where(mask == 0, mask)\n\n plt.figure()\n plt.subplot(1, 2, 1)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.subplot(1, 2, 2)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.imshow(masked, 'jet', interpolation='nearest', alpha=0.5)\n plt.show()", "def showComponents(self, mask):\n\n from skimage import measure\n\n thresh = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)[1]\n labels = measure.label(thresh, neighbors=8, background=0)\n for label in range(0,len(labels)):\n img = np.zeros(mask.shape)\n # if this is the background label, ignore it\n if label == 0:\n continue\n img[labels==label]=255\n numPixels = cv2.countNonZero(img)\n\n \t# if the number of pixels in the component is sufficiently\n \t# large, then add it to our mask of \"large blobs\"\n if numPixels > 500:\n showme(img, 'Contour '+str(label))", "def watershed(mask, img, plotImage = False, kernelSize = None):\n imgCopy = img.copy()\n maskCopy = np.array(mask.copy(), dtype=np.uint8)\n \n if kernelSize is None:\n kernelSize = 2\n\n # Finding sure foreground area\n #dist_transform = cv2.distanceTransform(mask, cv2.DIST_L2, 5)\n #ret, sure_fg = cv2.threshold(dist_transform,0.3*dist_transform.max(),255,0) #change the second argument to change the sensitivity \n maskClosed = skimage.morphology.closing(np.array(maskCopy, dtype=np.uint8))\n maskClosed = skimage.morphology.closing(np.array(maskClosed, dtype=np.uint8))\n kernel = np.ones((kernelSize,kernelSize), np.uint8)\n # maskCopy = img_as_bool(maskCopy)\n sure_fg = cv2.erode(maskClosed, kernel, iterations = 2) ###\n sure_fg = skimage.morphology.closing(np.array(sure_fg, dtype=np.uint8))\n # kernel = np.ones((2,2), np.uint8)\n # sure_fg = binary_closing(sure_fg, kernel)\n \n # sure background area\n #kernel = np.ones((5, 5), np.uint8)\n #sure_bg = cv2.dilate(mask, kernel, iterations = 1)\n sure_fg_bool = 1 - img_as_bool(sure_fg)\n # sure_bg = np.uint8(1 - morphology.medial_axis(sure_fg_bool)) ### \n sure_bg = np.uint8(1 - morphology.skeletonize(sure_fg_bool))\n sure_bg[0, :] = 1\n sure_bg[-1, :] = 1\n sure_bg[:, 0] = 1\n sure_bg[:, -1] = 1\n \n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg, sure_fg)\n \n if plotImage:\n plt.figure()\n plt.imshow(sure_fg)\n plt.title(\"Inner Marker\")\n plt.figure()\n plt.imshow(sure_bg)\n plt.title(\"Outer Marker\")\n plt.figure()\n plt.imshow(unknown)\n plt.title(\"Unknown\")\n \n # Marker labelling\n ret, markers = cv2.connectedComponents(sure_fg)\n\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers+1\n\n # Now, mark the region of unknown with zero\n markers[unknown==1] = 0\n \n if plotImage:\n plt.figure()\n plt.imshow(markers, cmap='jet')\n plt.title(\"Markers\")\n \n # Do watershed\n markers = cv2.watershed(imgCopy, markers)\n \n imgCopy[markers == -1] = [0, 255 ,0]\n\n if plotImage:\n plt.figure()\n plt.imshow(markers,cmap='jet')\n plt.title(\"Mask\")\n plt.figure()\n plt.imshow(img)\n plt.title(\"Original Image\")\n plt.figure()\n plt.imshow(imgCopy)\n plt.title(\"Marked Image\")\n plt.show()\n\n return markers", "def inpaint(self, img_slice, mask_slice, min_x, max_x, min_y, max_y, views='lateral'):\n # create binary mask\n mask = np.zeros(img_slice.shape)\n mask[min_x:max_x, min_y:max_y] = 1\n # keep a copy of original to have background later \n img_orig = np.copy(img_slice)\n mask_binary = np.copy(mask)\n\n # rotate image if coronal\n if views=='coronal':\n img_slice = np.rot90(img_slice, axes=(1, 0)) # image is from lat,ax -> ax,lat\n mask_slice = np.rot90(mask_slice, axes=(1, 0))\n mask = np.rot90(mask, axes=(1, 0))\n \n # prepare binary mask for net\n mask = cv2.resize(mask, self.resize_size, interpolation=cv2.INTER_NEAREST)\n mask = torch.Tensor(mask) # gives dtype float32\n mask = mask.unsqueeze(0)\n mask = mask.unsqueeze(0)\n\n # prepare seg mask for net\n mask_slice[mask_slice==self.vertebra_id] = 0\n # resize to network size\n mask_seg = cv2.resize(mask_slice, self.resize_size, interpolation=cv2.INTER_NEAREST)\n mask_seg = np.uint8(np.round(mask_seg)) # just to be sure\n\n mask_seg = self.map_vert_to_class(mask_seg)\n mask_seg = torch.Tensor(mask_seg) # gives dtype float32\n mask_seg_one_hot = torch.nn.functional.one_hot(mask_seg.long(), num_classes=6)\n mask_seg_one_hot = mask_seg_one_hot.permute(2,0,1)\n mask_seg_one_hot = mask_seg_one_hot.unsqueeze(0)\n mask_seg = mask_seg.unsqueeze(0)\n mask_seg = mask_seg.unsqueeze(0)\n\n # prepare img for net \n img_slice = cv2.resize(img_slice, self.resize_size)\n img_slice = np.clip(img_slice, -1024, 3071) # clip to HU units\n img_slice = np.uint8(255*(img_slice+1024)/4095) # normalize to range 0-255 \n img_slice = img_slice[:,:, None]\n img_slice = self.toTensor(img_slice)\n img_slice = img_slice.unsqueeze(0)\n corrupt_img = (1-mask)*img_slice\n\n if self.use_cuda:\n mask = mask.cuda()\n mask_seg = mask_seg.cuda()\n corrupt_img = corrupt_img.cuda() \n\n # inpaint\n if views=='lateral':\n netG = self.netGlat\n elif views=='coronal':\n netG = self.netGcor\n\n # get prediction\n with torch.no_grad():\n _, inpainted_mask, inpainted_img = netG(corrupt_img, mask_seg, mask)\n inpainted_mask = self.softmax(inpainted_mask)\n\n #inpainted_mask = torch.argmax(inpainted_mask, dim=1)\n inpainted_img = inpainted_img * mask + corrupt_img * (1. - mask)\n inpainted_mask = inpainted_mask * mask + mask_seg_one_hot * (1. - mask)\n #inpainted_mask = self.map_class_to_vert(inpainted_mask)\n\n # set img back to how it was\n inpainted_img = inpainted_img.squeeze().detach().cpu().numpy()\n inpainted_img = (inpainted_img)*4095 - 1024 # normalize back to HU units \n inpainted_img = cv2.resize(inpainted_img, (self.orig_ax_length, self.orig_ax_length))\n # set mask back\n inpainted_mask = inpainted_mask.squeeze().detach().cpu().numpy()\n inpainted_mask_resized = np.zeros((6, self.orig_ax_length, self.orig_ax_length))\n for i in range(6):\n if views=='coronal':\n inpainted_mask_resized[i,:,:] = np.rot90(cv2.resize(inpainted_mask[i,:,:], (self.orig_ax_length, self.orig_ax_length))) #, interpolation=cv2.INTER_NEAREST)\n else:\n inpainted_mask_resized[i,:,:] = cv2.resize(inpainted_mask[i,:,:], (self.orig_ax_length, self.orig_ax_length)) #, interpolation=cv2.INTER_NEAREST)\n inpainted_mask = inpainted_mask_resized\n \n if views=='coronal':\n inpainted_img = np.rot90(inpainted_img) #, axes=(1, 0))\n\n return inpainted_img, inpainted_mask, mask_binary", "def display(self, raw_img=True):\n cv2.imshow('mask', self.__mask)\n if raw_img:\n cv2.imshow('raw image', self.__img)", "def show_performance(model):\n val_image_ids_ = [i for i in val_image_ids]\n np.random.shuffle(val_image_ids_)\n\n df_val = area_filter(val_image_ids_, val_coco)\n image_id = df_val['image_id'].iloc[0]\n annotation_ids = df_val[df_val['image_id'] == image_id]['annotation_id'].tolist()\n\n image_json = val_coco.loadImgs([image_id])[0]\n raw_image = cv2.imread(os.path.join(\"{}/{}/{}\".format(data_dir, val_type, image_json['file_name'])))\n height, width, _ = raw_image.shape\n\n # decode the mask, using annotation id created at the group by above\n binary_mask = process_mask(val_coco, annotation_ids, width, height)\n\n # preprocess input and mask (resize to 128, scale to [0, 1))\n input_image, input_mask = preprocess(raw_image, binary_mask)\n\n input_mask = np.expand_dims(input_mask, axis=-1)\n predicted_mask = model.predict(np.array([input_image]))[0]\n\n plt.figure(figsize=(20, 20))\n\n title = ['Input Image', 'True Mask', 'Predicted Mask']\n display_list = [input_image[:, :, ::-1], input_mask, predicted_mask]\n for i in range(len(display_list)):\n plt.subplot(1, len(display_list), i+1)\n plt.title(title[i])\n plt.imshow(array_to_img(display_list[i]))\n plt.axis('off')\n plt.show()", "def draw_contours(self, image, maskImg):\r\n # Required variables..\r\n x, y, width, height = 0, 0, 0, 0\r\n # Find contours..\r\n contours, hierarchy = cv2.findContours(image=maskImg, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) # Playable Parameters..\r\n # Draw the contours..\r\n for contour in contours:\r\n # Calculate the area of the contour, so can remove unnecessary contours..\r\n area = cv2.contourArea(contour=contour)\r\n if area > 3000: # Playable adjustment..!! Found Good as 3000 for current light condition.. change this if light condition changes..\r\n # Draw the contours to the image -- actual frame..\r\n if self.debug_mode:\r\n cv2.drawContours(image=image, contours=contour, contourIdx=-1, color=(255, 255, 0), thickness=4)\r\n # Find the perimeter of the markers detected...\r\n perimeter = cv2.arcLength(curve=contour, closed=True)\r\n # Approximating/Finding the corners of the image from the obtained corners..\r\n approx_corners = cv2.approxPolyDP(curve=contour, epsilon=0.02 * perimeter, closed=True)\r\n # Find the bounding box rectangle for the approximated corners..\r\n x, y, width, height = cv2.boundingRect(approx_corners)\r\n # Return the values with which a rectangle can be drawn..\r\n return x, y, width, height", "def show(im,fig= None): #X\n im = im.copy()\n if len(im.shape)==1 or im.shape[1]==1:\n im = X2patch(im)\n im[im<=DEAD]=-0.5\n if fig is None:\n plt.figure()\n fig = plt.imshow(hsv_to_rgb(im+0.5))\n fig.set_data(hsv_to_rgb(im+0.5))\n plt.draw()\n plt.pause(0.001)\n return fig", "def mask(self):", "def __set_mask_regions(self):\n self.bottom_clip = np.int32(np.int32([[[60,0], [1179,0], [1179,650], [60,650]]]))\n self.roi_clip = np.int32(np.int32([[[640, 425], [1179,550], [979,719],\n [299,719], [100, 550], [640, 425]]]))", "def visualize(self):\n colors = {'outline': (220, 220, 220),\n 'inlier': (0, 255, 0),\n 'outlier': (0, 0, 255),\n 'lines': (128, 220, 128)}\n # Create output image for visualization\n gap = 5\n h1, w1 = self.target.image.shape[:2]\n h2, w2 = self.image.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)\n vis[:h1, :w1, :] = self.target.image\n w1 += gap\n vis[:h2, w1:w1+w2, :] = self.image\n \n # Draw the located object \n quad = np.float32(self.quad) + np.float32([w1, 0])\n self.draw(vis, colors['outline'], 2, quad)\n \n # draw point details\n inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]\n outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]\n if colors['outlier'] is not None: # draw x on each point\n r = 2 # radius\n thickness = 2\n for x0, y0, x1, y1 in outliers:\n cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)\n if colors['lines'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)\n if colors['inlier'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)\n cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)\n return vis", "def plot_diagnostics(foldspec, flag, mask):\n \n plt.figure(figsize=(15,10))\n \n plt.subplot(231)\n plt.plot(foldspec.mean(0).mean(0), color='k')\n plt.xlabel('phase (bins)')\n plt.ylabel('I (arb.)')\n plt.title('Pulse Profile')\n plt.xlim(0, foldspec.shape[-1])\n \n plt.subplot(232)\n plt.title('RFI flagging parameter (log10)')\n plt.xlabel('time (bins)')\n plt.ylabel('freq (bins)')\n plt.imshow(np.log10(flag).T, aspect='auto')\n\n plt.subplot(233)\n plt.title('Manual off-gate scaling')\n plt.imshow(mask.T, aspect='auto', cmap='Greys')\n plt.xlabel('time (bins)')\n \n plt.subplot(234)\n plt.imshow(foldspec.mean(0), aspect='auto')\n plt.xlabel('phase')\n plt.ylabel('freq')\n\n plt.subplot(235)\n plt.imshow(foldspec.mean(1), aspect='auto')\n plt.xlabel('phase')\n plt.ylabel('time')\n\n plt.subplot(236)\n plt.imshow(foldspec.mean(2).T, aspect='auto')\n plt.xlabel('time')\n plt.ylabel('freq')", "def showProcessing(img, thresh = None, plotImage = False, fillHole = False):\n if thresh is None:\n # Perform Otsu thresholding\n thresh, mask = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n else: \n thresh, mask = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)\n \n # Delete Small Objects \n numPixelsInImage = img.shape[0] * img.shape[1]\n minPixelCount = 2\n mask_smallDeleted = deleteSmallObjects(mask, minPixelCount)\n \n # Fill holes\n if fillHole:\n mask_holesFilled = fillHoles(mask_smallDeleted)\n else:\n mask_holesFilled = mask_smallDeleted \n \n if plotImage:\n plt.imshow(img)\n plt.title(\"Original\")\n plt.show()\n\n plt.imshow(mask)\n plt.title(\"Otsu thresholding\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(mask_smallDeleted)\n plt.title(\"Small objects deleted\")\n plt.show()\n \n if fillHole:\n plt.imshow(mask_holesFilled)\n plt.title(\"Filled holes\")\n plt.show()\n \n return mask_holesFilled", "def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):\n import matplotlib.pyplot as plt\n for i in xrange(rois_blob.shape[0]):\n rois = rois_blob[i, :]\n im_ind = rois[0]\n roi = rois[1:]\n im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()\n im += cfg.PIXEL_MEANS\n im = im[:, :, (2, 1, 0)]\n im = im.astype(np.uint8)\n cls = labels_blob[i]\n plt.imshow(im)\n print 'class: ', cls, ' overlap: ', overlaps[i]\n plt.gca().add_patch(\n plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],\n roi[3] - roi[1], fill=False,\n edgecolor='r', linewidth=3)\n )\n plt.show()", "def get_mask(self, img):\n raise NotImplementedError()", "def ShowLongitBackground(spectra,spectraUp,spectraDown,spectraAv,all_titles,all_filt,object_name,NBIMGPERROW=2,right_edge=1800):\n NBSPEC=len(spectra)\n MAXIMGROW=(NBSPEC-1) / NBIMGPERROW +1\n\n f, axarr = plt.subplots(MAXIMGROW,NBIMGPERROW,figsize=(25,5*MAXIMGROW))\n f.tight_layout()\n for index in np.arange(0,NBSPEC):\n ix=index%NBIMGPERROW\n iy=index/NBIMGPERROW\n axarr[iy,ix].plot(spectra[index],'r-')\n axarr[iy,ix].plot(spectraUp[index],'b-')\n axarr[iy,ix].plot(spectraDown[index],'g-')\n axarr[iy,ix].plot(spectraAv[index],'m-')\n thetitle=\"{}) : {} \".format(index,all_titles[index])\n axarr[iy,ix].set_title(thetitle)\n axarr[iy,ix].grid(True)\n axarr[iy,ix].set_ylim(0.,spectra[index][:right_edge].max()*1.2)\n axarr[iy,ix].annotate(all_filt[index],xy=(0.05,0.9),xytext=(0.05,0.9),verticalalignment='top', horizontalalignment='left',color='blue',fontweight='bold', fontsize=20, xycoords='axes fraction')\n title='Longitudinal background Up/Down'.format(object_name)\n plt.suptitle(title,size=16)", "def imshow(self, title=None, aspect='auto', **kw):\n\n xlabel = 'Sample'\n ylabel = 'Detector number'\n image = super(Tod, self).imshow(mask=self.mask, title=title,\n origin='upper', xlabel=xlabel,\n ylabel=ylabel, aspect=aspect, **kw)\n return image", "def overlay_prob(image, mask, cutoff=0.5):\n if len(image.shape) == 3:\n image = image[: ,: ,0]\n if len(mask.shape) == 3:\n mask = mask[: ,: ,0]\n if np.amax(image) > 100:\n image = image /255\n\n mask = mask>=cutoff\n mask = mask.astype(int)\n masked = np.ma.masked_where(mask == 0, mask)\n\n plt.figure()\n plt.subplot(1, 2, 1)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.subplot(1, 2, 2)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.imshow(masked, 'jet', interpolation='nearest', alpha=0.5)\n plt.show()", "def imshow_surface(self):\n plt.imshow(self.z)\n plt.colorbar()\n plt.show()", "def paintMaskOptions(self): \n buttonMask = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_UNDO), self.buttonTooltips[\"undo\"], self.showTooltip, self.removeTooltip)\n buttonMask.topleft = 30, 500\n buttonMask.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.changeMask, \"mask\")\n self.window.add_child(buttonMask)\n self.activeWidget.append(buttonMask)\n buttonFileChooser = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_FILE), self.buttonTooltips[\"file\"], self.showTooltip, self.removeTooltip)\n buttonFileChooser.topleft = 150, 500\n buttonFileChooser.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.changeMask, \"file\")\n self.window.add_child(buttonFileChooser)\n self.activeWidget.append(buttonFileChooser)", "def get_mask(self, methods=None): \r\n plot_mask = np.ones(self.ndetects, np.bool_)\r\n rec_tr = self.rec_track\r\n\r\n for nm,method in enumerate(methods):\r\n method_mask = rec_tr[method] == 1\r\n plot_mask = np.multiply(~method_mask, plot_mask)\r\n\r\n return plot_mask", "def visualize_cam(mask, img, alpha=1.0):\n heatmap = (255 * mask.squeeze()).type(torch.uint8).cpu().numpy()\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n heatmap = torch.from_numpy(heatmap).permute(2, 0, 1).float().div(255)\n b, g, r = heatmap.split(1)\n heatmap = torch.cat([r, g, b]) * alpha\n\n result = heatmap+img.cpu()\n result = result.div(result.max()).squeeze()\n\n return heatmap, result", "def get_masked_scene(orig, mask, local_context_size = 80, dilation=False):\n orig_scene = orig.copy()\n mask_scene = mask.copy()\n orig_scene_no_mask = orig.copy()\n \n mask_info = np.where(mask_scene == 0) \n min_x = max(min(mask_info[0]) - local_context_size, 0)\n max_x = max(mask_info[0]) + local_context_size\n min_y = max(min(mask_info[1]) - local_context_size, 0)\n max_y = max(mask_info[1]) + local_context_size\n \n orig_scene = orig_scene[min_x:max_x,min_y:max_y]\n orig_scene_no_mask = orig_scene_no_mask[min_x:max_x,min_y:max_y]\n mask_scene = mask_scene[min_x:max_x,min_y:max_y]\n \n dialation_mask = np.zeros(mask_scene.shape) + 255\n \n if dilation:\n dialation_mask = cv2.dilate(255-mask_scene, np.ones((local_context_size,local_context_size)))\n \n #implot(dialation_mask)\n #plt.imshow(dialation_mask, 'gray')\n \n for x in range(mask_scene.shape[0]):\n for y in range(mask_scene.shape[1]):\n if mask_scene[x, y] == 0:\n orig_scene[x, y, :] = 0\n orig_scene_no_mask[x,y,:] = 0\n if dilation:\n if dialation_mask[x,y] == 0:\n orig_scene[x, y, :] = 0\n \n return orig_scene, mask_scene, orig_scene_no_mask, dialation_mask", "def show_field(self, vehicles, type):\n\n # starting pixels x = 0, y = 0 on field image\n start_x = 78\n start_y = 45\n\n # block pixel width is slightly different per field size\n if self.size == 6:\n block_width = 72\n elif self.size == 9:\n block_width = 69\n elif self.size == 12:\n block_width = 68.5\n\n field = plt.imread(f\"data/RushHourImages/RushHour{self.size}.jpg\")\n fig, ax = plt.subplots()\n plt.imshow(field)\n plt.axis('off')\n\n for vehicle in vehicles:\n if vehicle.orientation == 'H':\n x = start_x + (vehicle.x * block_width)\n y = start_y + (vehicle.y * block_width)\n if vehicle.length == 2:\n car = plt.imread(f\"data/RushHourImages/Car{vehicle.id}.png\")\n else:\n car = plt.imread(f\"data/RushHourImages/Truck{vehicle.id}.png\")\n\n # truck: the image coordinate is his middle, which changes with the length of the car\n x += 40\n\n if vehicle.orientation == 'V':\n x = start_y + (vehicle.x * block_width)\n y = start_x + (vehicle.y * block_width)\n if vehicle.length == 2:\n car = plt.imread(f\"data/RushHourImages/Car-rotated{vehicle.id}.png\")\n else:\n car = plt.imread(f\"data/RushHourImages/Truck-rotated{vehicle.id}.png\")\n y += 40\n\n if self.size == 6:\n imagebox = OffsetImage(car, zoom=0.6)\n elif self.size == 9:\n imagebox = OffsetImage(car, zoom=0.4)\n elif self.size == 12:\n imagebox = OffsetImage(car, zoom=0.3)\n\n imagebox.image.axes = ax\n xy = (x, y)\n ab = AnnotationBbox(imagebox, xy, frameon=False)\n ax.add_artist(ab)\n\n if type == True:\n plt.show(block=False)\n plt.pause(0.001)\n plt.close()\n else:\n plt.show()", "def imshow(self, mask=None, title=None, new_figure=True, origin=None, **kw):\n\n if mask is None and self.coverage is not None:\n mask = self.coverage <= 0\n if mask is not None:\n data = np.array(self, copy=True)\n data[mask] = np.nan\n else:\n data = np.asarray(self)\n\n if origin is None:\n origin = self.origin\n\n # check if the map has no astrometry information\n if not self.has_wcs():\n if 'xlabel' not in kw:\n kw['xlabel'] = 'X'\n if 'ylabel' not in kw:\n kw['ylabel'] = 'Y'\n image = super(Map, self).imshow(title=title, new_figure=new_figure,\n origin=origin, **kw)\n return image\n\n fitsobj = kapteyn.maputils.FITSimage(externaldata=data,\n externalheader=self.header)\n if new_figure:\n fig = pyplot.figure()\n frame = fig.add_axes((0.1, 0.1, 0.8, 0.8))\n else:\n frame = pyplot.gca()\n if title is not None:\n frame.set_title(title)\n annim = fitsobj.Annotatedimage(frame, blankcolor='w')\n annim.Image(interpolation='nearest')\n grat = annim.Graticule()\n grat.setp_gratline(visible=False)\n annim.plot()\n annim.interact_imagecolors()\n annim.interact_toolbarinfo()\n annim.interact_writepos()\n pyplot.show()\n return annim", "def plot_img_and_mask_transformed(img, mask, img_tr, mask_tr):\n \n ## Using 4 columns for 4 images\n fig, axs = plt.subplots(ncols=4, figsize=(16, 4), sharex=True, sharey=True)\n axs[0].imshow(img)\n axs[1].imshow(mask[:, :, 0])\n axs[2].imshow(img_tr)\n axs[3].imshow(mask_tr[:, :, 0])\n #for ax in axs:\n # ax.set_xlim(0, input_size)\n # ax.axis('off')\n fig.tight_layout()\n plt.show()", "def visualize_model(self, ax):\n ax.imshow(self.w[1:].reshape(28, -1, order='F').T, cmap='bone')", "def plot_with_augmentation(image, mask, augment):\n augmented = augment(image=image, mask=mask)\n image_flipped = augmented[\"image\"]\n mask_flipped = augmented[\"mask\"]\n visualize(image_flipped, mask_flipped, original_image=image, original_mask=mask)", "def detect_and_draw_as_marker(self, image):\r\n # Required variables\r\n count = 0\r\n # convert to HSV.. so that we can filter out the image from our captured HSV values for our markers previously..\r\n HSVimg = cv2.cvtColor(src=image, code=cv2.COLOR_BGR2HSV)\r\n # loop through all marker's HSV values\r\n for marker_HSV in self.markers_HSV:\r\n lower_boundary = np.array(marker_HSV[0])\r\n upper_boundary = np.array(marker_HSV[1])\r\n # Get the mask image that satisfies the lower and upper HSV values..\r\n maskImg = cv2.inRange(src=HSVimg, lowerb=lower_boundary, upperb=upper_boundary)\r\n\r\n '''Draw the contours for the mask image detected, marker point for the marker'''\r\n # Get the bounding box corners (In the function call to self.draw_contours(), contours are drawn to original camera feed, if self.debug_mode is set to 1)\r\n x, y, width, height = self.draw_contours(image, maskImg)\r\n if self.debug_mode:\r\n cv2.rectangle(img=image, pt1=(x, y), pt2=(x + width, y + height), color=(255, 0, 255), thickness=3)\r\n # Select the marker point..\r\n marker_point_center = (x + width // 2, y)\r\n # Draw the marker point..\r\n # cv2.circle(img=image, center=marker_point_center, radius=5, color=(2, 255, 10), thickness=cv2.FILLED)\r\n cv2.circle(img=image, center=marker_point_center, radius=5, color=list(self.marker_colors[count]), thickness=cv2.FILLED)\r\n\r\n # Append the trace point of marker..\r\n self.marker_path_points.append([marker_point_center, count])\r\n #print(count, end=\"\\n\")\r\n count += 1", "def get_regions_mask(self, input):", "def apply_mask(image, masks):\n if not masks:\n return\n hide = [m for m in masks if not m[\"visible\"]]\n show = [m for m in masks if m[\"visible\"]]\n if show:\n # Assume something is masked, unless it's shown.\n mask = np.ones(image.shape[:2], dtype=\"uint8\")\n for m in show:\n cv2.drawContours(\n mask,\n contours=[m[\"contour\"]],\n contourIdx=-1,\n color=0,\n thickness=-1,\n )\n else:\n # Assume something is unmasked, unless it's hidden.\n mask = np.zeros(image.shape[:2], dtype=\"uint8\")\n for m in hide:\n cv2.drawContours(\n mask,\n contours=[m[\"contour\"]],\n contourIdx=-1,\n color=255,\n thickness=cv2.FILLED,\n )\n image[mask > 0] = 0", "def addNonBarrelBlue(self, event):\n # let user draw second ROI\n ROI = RoiPoly(color='r') #let user draw ROI\n plt.show(block=False)\n mask = ROI.get_mask(self.greyimg)\n mask = mask*2\n self.ROI += mask", "def detect_sea_lions_in_image(filename,\n model,\n patch_h,\n patch_w,\n resize_image_patch_to_h, \n resize_image_patch_to_w,\n resize_mask_patch_to_h,\n resize_mask_patch_to_w,\n display_mask=False):\n\n train_image = cv2.imread(filename)\n image_patches_list = dhap.slice_the_image_into_patches(train_image, patch_h, patch_w)\n\n # Recombine the image from the patches (train_image.shape != image.shape)\n # bacause the size of the image is adjusted to be a multiple of patch_h and patch_w. \n image = dhap.combine_pathes_into_image(image_patches_list)\n\n if (display_mask == True):\n fig, ax = plt.subplots()\n cax = ax.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n cbar = fig.colorbar(cax)\n plt.axis(\"off\")\n plt.show() \n\n # Resize the patches to the ones used by the model.\n image_patches_list = dhap.resize_patches_in_patches_list(image_patches_list, \n resize_image_patch_to_h, \n resize_image_patch_to_w)\n\n mask_patches_list = apply_model_to_image_patches_list(image_patches_list, model)\n\n # The model outputs a (1,n) vertor. Reshape it to a matrix.\n mask_patches_list = reshape_patches_list(mask_patches_list,\n resize_mask_patch_to_h,\n resize_mask_patch_to_w)\n\n mask_patches_list = resized_image_patches_list = dhap.resize_patches_in_patches_list(mask_patches_list, \n patch_h, \n patch_w)\n\n mask = dhap.combine_pathes_into_mask(mask_patches_list)\n\n image = dhap.apply_mask(image, mask)\n\n if (display_mask == True):\n fig, ax = plt.subplots()\n cax = ax.imshow(mask)\n cbar = fig.colorbar(cax)\n plt.axis(\"off\")\n plt.show() \n\n\n if (display_mask == True):\n fig, ax = plt.subplots()\n cax = ax.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n cbar = fig.colorbar(cax)\n plt.axis(\"off\")\n plt.show() \n\n\n\n print(mask_patches_list[0][0].shape)\n\n\n #combine_pathes_into_image(patches_list", "def displayContours(self):\n #obsolete?\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n for modelNode in modelNodes.values():\n if modelNode.GetAttribute(\"contour\") == \"1\":\n needleNode = slicer.mrmlScene.GetNodeByID(modelNode.GetAttribute(\"needleID\"))\n if needleNode != None:\n if needleNode.GetDisplayVisibility()==1:\n modelNode.SetDisplayVisibility(abs(int(slicer.modules.NeedleFinderWidget.displayContourButton.checked)-1))\n d = modelNode.GetDisplayNode()\n d.SetSliceIntersectionVisibility(abs(int(slicer.modules.NeedleFinderWidget.displayContourButton.checked)-1))", "def display_multispectral_image(data, mask, sl, scale = 300):\n fig, axes = plt.subplots(2,2, figsize=(10,10))\n ax = axes.ravel()\n for k, ch in enumerate(chn_names):\n ax[k].imshow(data[:, :, sl, k].T + 300*mask[:,:,sl].T, cmap='gray', origin='lower')\n ax[k].set_title(ch)\n ax[k].set(xlabel=\"\")\n ax[k].axis('off')\n plt.suptitle('The multispectral MRI slice %d with the ROI mask' % sl) \n plt.tight_layout\n plt.show()", "def botStack(self):\r\n\r\n self.z_stack=self.img.shape[0]-1\r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaleds(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def plot_jump_flags_image(self):\n dm = datamodels.MIRIRampModel(self.jump_file)\n fig, axs = plt.subplots(1, 1, figsize=(8, 8))\n\n group_dq = np.squeeze(dm.groupdq, axis=0)\n group_dq = np.sum(group_dq, axis=0)\n\n axs.imshow(group_dq, cmap='gray', interpolation='nearest', origin='lower', vmin=2, vmax=4)\n\n plt.tight_layout()\n\n plot_name = os.path.join(self.output_dir, 'jump_flags.pdf')\n try:\n os.remove(plot_name)\n except:\n pass\n\n fig.savefig(plot_name, dpi=100)", "def mask(self, byclass, height, width, margin=0, figsize=(10, 10), dpi=180):\n # make ~binary mask using available classes\n style = {cls: ('k', '-') for cls in byclass}\n fig = Figure(figsize=figsize)\n fig.tight_layout(pad=0)\n fig.subplots_adjust(hspace=0, wspace=0, left=0, right=1, bottom=0, top=1)\n canvas = FigureCanvas(fig)\n ax = fig.subplots(1, 1)\n self.show_style(ax, style, byclass)\n ax.set_xlim(0 - margin, height + margin)\n ax.set_ylim(0 - margin, width + margin)\n canvas.draw()\n mask = self.figure_buffer(fig, dpi=dpi)\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\n # fill in the gaps via:\n # https://www.learnopencv.com/filling-holes-in-an-image-using-opencv-python-c/\n _, thresholded = cv2.threshold(mask, 220, 255, cv2.THRESH_BINARY_INV);\n floodfilled = thresholded.copy()\n h, w = thresholded.shape[:2]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(floodfilled, mask, (0, 0), 255);\n mask = cv2.bitwise_not(thresholded | cv2.bitwise_not(floodfilled))\n return mask", "def draw_ui(img, vehicle_count, capacity, exit_masks=[]):\r\n # EXIT MASKS\r\n cv2.line(img, (0,400),(645,400),(0,255,0),2)\r\n cv2.line(img, (732,590),(1280,500),(0,255,0),2)\r\n \r\n # STATS\r\n cv2.rectangle(img, (0, 0), (img.shape[1], 50), (0, 0, 0), cv2.FILLED)\r\n cv2.putText(img, (\"Density: {cur_left}% Vehicles passed: {left} \\\r\n Density: {cur_right}% Vehicles passed: {right}\".format(\\\r\n left=vehicle_count[0], right=vehicle_count[1], \\\r\n cur_right=round(capacity[1]*100,3),\\\r\n cur_left=round(capacity[0]*100,3))), (30, 30),\\\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 1)\r\n \r\n return img", "def visualize_model(self, ax):\n rs = self.w[1:,:].reshape(28, 28, 10, order='F')\n rs2 = np.transpose(rs, axes=[1,0,2])\n ax.imshow(rs2.reshape(28, -1, order='F'), cmap='bone')", "def visualize(scores, faces):\n pc_min, pc_max = np.min(scores, 0), np.max(scores, 0)\n pc_scaled = (scores - pc_min) / (pc_max - pc_min) \n fig, ax = plt.subplots()\n for i in range(len(faces)):\n imagebox = offsetbox.OffsetImage(faces[i, :].reshape(64,64).T, cmap=plt.cm.gray, zoom=0.5)\n box = offsetbox.AnnotationBbox(imagebox, pc_scaled[i, 0:2])\n ax.add_artist(box)\n plt.show()", "def draw_boxes_cars(img, vehicles_instance):\n\n for car_number in range(1, vehicles_instance.number_of_found_cars+1):\n # Find pixels with each car_number label value\n nonzero = (vehicles_instance.binary_map == car_number).nonzero()\n\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)\n\n return img", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def pipeline(image):\n # undistort image\n undistorted_image = undistort_image(image)\n superimposed_image = find_lanes(undistorted_image)\n labels = find_vehicles(undistorted_image)\n\n draw_img = draw_labeled_bboxes(superimposed_image, labels)\n\n \n return draw_img", "def draw_overlay(self):\n pass", "def data_assemble(self, x,y, r_cut, add_mask=5, pick_choice=False):\n #segmentation components\n obj_masks,center_mask_info, segments_deblend_list = self._seg_image(x, y, r_cut=r_cut)\n data_masks_center, _, xcenter, ycenter, c_index = center_mask_info\n image = self.cut_image(x,y,r_cut)\n self.raw_image = image\n src_mask = np.zeros_like(image)\n lens_mask = np.zeros_like(image)\n plu_mask = np.zeros_like(image)\n lenslight_mask_index = []\n if self.segmap is not None and self.interaction:\n segmap=self.segmap[0].data\n segdata = segmap[x - r_cut:x + r_cut + 1, y - r_cut:y + r_cut + 1]\n plt.imshow(segdata, origin='lower')\n nlabel = np.unique(segdata)\n for i in range(nlabel.shape[0] - 1):\n ax = (int((np.where(segdata == nlabel[i + 1])[0].max() - np.where(segdata == nlabel[i + 1])[0].min()) / 2 +\n np.where(segdata == nlabel[i + 1])[0].min()))\n ay = (int((np.where(segdata == nlabel[i + 1])[1].max() - np.where(segdata == nlabel[i + 1])[1].min()) / 3 +\n np.where(segdata == nlabel[i + 1])[1].min()))\n plt.text(ay, ax, repr(nlabel[i + 1]), color='r', fontsize=15)\n plt.title('Input segmentation map')\n plt.show()\n source_mask_index = [int(sidex) for sidex in input('Selection of data via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + segdata*(segdata==i*1)\n # lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + segdata*(segdata==i*1))\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + segdata*(segdata==i*1))\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n\n\n\n if self.segmap is None and self.interaction:\n self.plot_segmentation(image, segments_deblend_list, xcenter, ycenter, c_index)\n #source light\n if pick_choice:\n source_mask_index = [int(sidex) for sidex in input('Selection of data via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + obj_masks[i]\n #lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + obj_masks[i])\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + obj_masks[i])\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n else:\n src_mask = data_masks_center\n\n\n #adding pixels around the selected masks\n selem = np.ones((add_mask, add_mask))\n src_mask = ndimage.binary_dilation(src_mask.astype(np.bool), selem)\n plu_mask_out = ndimage.binary_dilation(plu_mask.astype(np.bool), selem)\n plu_mask_out = (plu_mask_out - 1)*-1\n\n #select source region to fit, or to use whole observation to fit\n ##1.select source region to fit\n snr = self.snr\n source_mask = image * src_mask\n #create background image for picked\n if self.background_rms is None:\n _, _, std = sigma_clipped_stats(image, sigma=snr, mask=source_mask)\n tshape = image.shape\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n else:\n tshape = image.shape\n std=np.mean(self.background_rms)\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n\n no_source_mask = (src_mask * -1 + 1) * img_bkg\n picked_data = source_mask + no_source_mask\n\n ##2.use whole observation to fit while mask out the contamination\n maskedimg = image * plu_mask_out\n\n ##orginize the output 'kwargs_data'\n kwargs_data = {}\n if pick_choice:\n kwargs_data['image_data'] = picked_data#select source region to fit\n else:\n kwargs_data['image_data'] = maskedimg#use whole observation to fit while mask out the contamination\n\n if self.background_rms is None:\n kwargs_data['background_rms'] = std\n self.background_rms = std\n else:\n kwargs_data['background_rms'] = np.mean(self.background_rms)\n kwargs_data['exposure_time'] = self.exp_time\n kwargs_data['transform_pix2angle'] = np.array([[1, 0], [0, 1]]) * self.deltaPix\n ra_at_xy_0 = (y - r_cut) * self.deltaPix # (ra,dec) is (y_img,x_img)\n dec_at_xy_0 = (x - r_cut) * self.deltaPix\n kwargs_data['ra_at_xy_0'] = ra_at_xy_0\n kwargs_data['dec_at_xy_0'] = dec_at_xy_0\n\n #coordinate of the lens light\n xlenlight, ylenlight = [], []\n if lenslight_mask_index !=[]:\n for i in lenslight_mask_index:\n xlenlight.append(ra_at_xy_0 + int(xcenter[i]) * self.deltaPix )\n ylenlight.append(dec_at_xy_0 + int(ycenter[i])* self.deltaPix )\n\n #for output\n self.data = kwargs_data['image_data']\n self.kwargs_data = kwargs_data\n self.data_mask = src_mask\n self.lens_mask = lens_mask\n self.plu_mask = plu_mask_out\n self.obj_masks = obj_masks\n imageData = ImageData(**kwargs_data)\n self.imageData = imageData\n kwargs_seg = [segments_deblend_list, xcenter, ycenter, c_index]\n\n return kwargs_data, kwargs_seg, [xlenlight, ylenlight]", "def makeMaskGui(img,snapRange=60,clim='auto'):\n if isinstance(img,str): img = read(img)\n mask = Mask(img)\n if clim == \"auto\": clim = np.percentile(img,(2,98))\n ans='ok'\n while (ans != 'done'):\n plt.imshow(img)\n plt.clim(clim)\n plt.imshow(mask.getMatplotlibMask())\n plt.pause(0.01)\n ans = input(\"What's next p/P/c/C/r/R/done? (capitals = subtract)\")\n if ans == \"c\":\n print(\"Adding circle, click on center then another point to define radius\")\n vertices = getPoints(N=2,shape=img.shape,snapRange=snapRange)\n mask.addCircle(*vertices)\n if ans == \"C\":\n print(\"Subtracting circle, click on center then another point to define radius\")\n vertices = getPoints(N=2,shape=img.shape,snapRange=snapRange)\n mask.subtractCircle(*vertices)\n if ans == \"r\":\n print(\"Adding rectangle, click on one corner and then on the opposite one\")\n vertices = getPoints(N=2,shape=img.shape,snapRange=snapRange)\n mask.addRectangle(*vertices)\n if ans == \"R\":\n print(\"Subtracting rectangle, click on one corner and then on the opposite one\")\n vertices = getPoints(N=2,shape=img.shape,snapRange=snapRange)\n mask.subtractRectangle(*vertices)\n if ans == 'p':\n print(\"Adding polygon\")\n vertices = getPoints(N=-1,shape=img.shape,snapRange=snapRange)\n mask.addPolygon(*vertices)\n if ans == 'P':\n print(\"Subtracting polygon\")\n vertices = getPoints(N=-1,shape=img.shape,snapRange=snapRange)\n mask.subtractPolygon(*vertices)\n\n plt.imshow(mask.getMatplotlibMask())\n plt.pause(0.01)\n fname = input(\"Enter a valid filename (ext .edf or .npy) if you want to save the mask (empty otherwise)\")\n try:\n if fname != '':\n ext = os.path.splitext(fname)[1]\n if ext == '.edf':\n mask.save(fname)\n elif ext == '.npy':\n np.save(fname,mask.getMask())\n except Exception as e:\n log.error(\"Error in saving mask\")\n log.error(e)\n finally:\n return mask", "def MakeVenetianBlinds(self):\r\n\r\n amount = 128\r\n size = self.GetClientSize()\r\n region = wx.Region(0, 0, size.x, 1)\r\n\r\n for y in xrange(size.y):\r\n\r\n # Reverse the order of the bottom 4 bits\r\n j = (y & 8 and [1] or [0])[0] | (y & 4 and [2] or [0])[0] | \\\r\n (y & 2 and [4] or [0])[0] | (y & 1 and [8] or [0])[0]\r\n \r\n if 16*j+8 < amount:\r\n region.Union(0, y, size.x, 1)\r\n \r\n self.SetShape(region)", "def topStack(self):\r\n\r\n self.z_stack=0\r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def get_plot(sample):\n scale = (CANVAS_DIM/PATCH_DIM)\n ego_pose = sample[0]\n map_mask = sample[2]\n\n fig, ax = plt.subplots()\n ax.set_ylim([0, CANVAS_DIM]) # set the bounds to be 10, 10\n ax.set_xlim([0, CANVAS_DIM])\n ax.imshow(map_mask[0])\n\n for vehicle in sample[1]:\n plot_vehicle(ax, vehicle, ego_pose, scale)\n\n plt.show()", "def detect(self, img, thresh_hold, show=False):\n heat = np.zeros_like(img[:,:,0]).astype(np.float)\n # heatmap for all boxes found in last n_frames frame\n heat = add_heat(heat,self.boxes)\n # threshold for multiple frames\n thresh = thresh_hold*len(self.lengths)\n # initialization\n if len(self.lengths) == 0:\n thresh = thresh_hold\n heat = apply_threshold(heat,thresh)\n\n # Visualize the heatmap when displaying\n heatmap = np.clip(heat, 0, 255)\n # Find final boxes from heatmap using label function\n labels = label(heatmap)\n draw_img = draw_labeled_bboxes(np.copy(img), labels)\n if show:\n plot_dual(draw_img, heatmap,'Car positions','Heat Map',cm2='hot')\n return draw_img", "def show_red_mask(img, mask):\n img_ = img\n mask_ = np.bool_(mask)\n red = img_[:, :, 0]\n green = img_[:, :, 1]\n blue = img_[:, :, 2]\n red[mask_] = 255\n green[mask_] = 0\n blue[mask_] = 0\n return img_", "def hideIsoSurfaces(self):\n #research\n profprint()\n contourNode = slicer.util.getNode(self.contourNode)\n widget = slicer.modules.NeedleFinderWidget\n if contourNode != None:\n contourNode.SetDisplayVisibility(abs(widget.hideContourButton.isChecked()-1))\n contourNode.GetModelDisplayNode().SetSliceIntersectionVisibility(abs(widget.hideContourButton.isChecked()-1))", "def filterBankPatch(img, width=5):\n half = width / 2 # e.g. for 5, it's 2\n imgE = Views.extendBorder(img)\n ops = [offset(imgE, [x, y]) for x in xrange(-half, half + 1) for y in xrange(-half, half + 1)]\n return ops", "def find_cars(img, scale):\n img_boxes = [] # Clears img_boxes so we don't keep unwanted heatmap history\n count = 0\n draw_img = np.copy(img)\n\n # Make a heatmap of zeros\n heatmap = np.zeros_like(img[:, :, 0])\n\n # IMPORTANT : reading *.jpeg's (scaled 0-255, aka scaling needed), but\n # # trained on *.png's (scaled 0-1, aka scaling not needed)\n if img.dtype == 'uint8':\n img = img.astype(np.float32) / 255 # aka scaling needed\n\n img_tosearch = img[ystart:ystop, :, :]\n ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')\n\n if scale != 1: # resize whole image instead of separate windows\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))\n\n ch1 = ctrans_tosearch[:, :, 0]\n ch2 = ctrans_tosearch[:, :, 1]\n ch3 = ctrans_tosearch[:, :, 2]\n\n # Define blocks and steps as above\n # These hold the number of HOG cells\n nxblocks = (ch1.shape[1] // pix_per_cell) - 1 # Note : '//' causes integers to be result, instead of floats\n nyblocks = (ch1.shape[0] // pix_per_cell) - 1\n # How many features per block are we going to be extracting\n nfeat_per_block = orient * cell_per_block ** 2\n window = 64\n nblocks_per_window = (window // pix_per_cell) - 1\n # aka 75% overlap between cells\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step\n\n # Compute individual channel HOG features for the entire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)\n\n for xb in range(nxsteps):\n for yb in range(nysteps):\n count += 1\n ypos = yb * cells_per_step\n xpos = xb * cells_per_step\n # Extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos * pix_per_cell\n ytop = ypos * pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64))\n\n # Get colour features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n # Scale features and make a prediction\n test_features = X_scaler.transform(\n np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))\n test_prediction = svc.predict((test_features))\n\n if test_prediction == 1:\n xbox_left = np.int(xleft * scale)\n ytop_draw = np.int(ytop * scale)\n win_draw = np.int(window * scale)\n cv2.rectangle(draw_img, (xbox_left, ytop_draw + ystart),\n (xbox_left + win_draw, ytop_draw + win_draw + ystart), (0, 0, 255))\n img_boxes.append(\n ((xbox_left, ytop_draw + ystart), (xbox_left + win_draw, ytop_draw + win_draw + ystart)))\n heatmap[ytop_draw + ystart:ytop_draw + win_draw + ystart, xbox_left:xbox_left + win_draw] += 1\n\n return draw_img, img_boxes, heatmap", "def _show(self, a):\n fig = plt.figure()\n fig.set_size_inches((2, 2))\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n plt.set_cmap('hot')\n ax.imshow(a, aspect='equal')\n plt.show()", "def draw_face_landmarks(data):\n head = extract_head(data);\n landmarks = data['position_data']['face_landmarks'][data['i']];\n face_box = data['position_data']['face_box'][data['i']];\n if face_box is not None:\n for p in landmarks:\n cv2.circle(head,p,3,(255,0,0));\n else:\n for p in landmarks:\n cv2.circle(head,p,3,(255,0,200));\n return head;", "def draw_windtunnel_border(ax):\n x_min = 0\n x_max = 1\n z_min = 0\n z_max = 0.254\n y_min = -0.127\n y_max = 0.127\n draw_rectangular_prism(ax, x_min, x_max, y_min, y_max, z_min, z_max)\n plt.draw()", "def visualize_in_scan(self, verbose=True):\n images = self.scan.load_all_dicom_images(verbose)\n \n # Preload contours and sort them by z pos.\n contours = sorted(self.contours, key=lambda c: c.image_z_position)\n fnames = self.scan.sorted_dicom_file_names.split(',')\n index_of_contour = [fnames.index(c.dicom_file_name) for c in contours]\n\n fig = plt.figure(figsize=(16,8))\n\n min_slice = min(index_of_contour)\n max_slice = max(index_of_contour)\n current_slice = min_slice\n\n ax_image = fig.add_axes([0.5,0.0,0.5,1.0])\n img = ax_image.imshow(images[current_slice].pixel_array,\n cmap=plt.cm.gray)\n\n contour_lines = []\n # We draw all the contours initally and set the visibility\n # to False. This works better than trying create and destroy\n # plots every time we update the image.\n for i,c in enumerate(contours):\n arr = c.to_matrix()\n cc, = ax_image.plot(arr[:,0], arr[:,1], '-r')\n cc.set_visible(i==0) # Set the first contour visible.\n contour_lines.append( cc )\n ax_image.set_xlim(-0.5,511.5); ax_image.set_ylim(511.5,-0.5)\n ax_image.axis('off')\n \n # Add the scan info table\n ax_scan_info = fig.add_axes([0.1, 0.8, 0.3, 0.1])\n ax_scan_info.set_axis_bgcolor('w')\n scan_info_table = ax_scan_info.table(\n cellText=[\n ['Patient ID:', self.scan.patient_id],\n ['Slice thickness:', '%.3f mm' % self.scan.slice_thickness],\n ['Pixel spacing:', '%.3f mm'%self.scan.pixel_spacing]\n ],\n loc='center', cellLoc='left'\n )\n # Remove the cell borders.\n # It Seems like there should be an easier way to do this...\n for cell in scan_info_table.properties()['child_artists']:\n cell.set_color('w')\n\n ax_scan_info.set_title('Scan Info')\n ax_scan_info.set_xticks([])\n ax_scan_info.set_yticks([])\n\n # Add annotations / characteristics table.\n ax_annotation_info = fig.add_axes([0.1, 0.45, 0.3, 0.25])\n ax_annotation_info.set_axis_bgcolor('w')\n\n # Create the rows to be displayed in the annotations table.\n cell_text = []\n for c in _all_characteristics_:\n row = []\n cname = c.capitalize()\n if cname.startswith('Int'):\n cname = 'InternalStructure'\n\n row.append(cname)\n row.append(getattr(self,cname)())\n row.append(getattr(self,c))\n\n cell_text.append(row)\n\n annotation_info_table = ax_annotation_info.table(\n cellText=cell_text,\n loc='center', cellLoc='left', colWidths=[0.45,0.45,0.1]\n )\n\n # Again, remove cell borders.\n for cell in annotation_info_table.properties()['child_artists']:\n cell.set_color('w')\n\n ax_annotation_info.set_title('Annotation Info')\n ax_annotation_info.set_xticks([])\n ax_annotation_info.set_yticks([])\n\n # Add the checkbox for turning contours on / off.\n ax_contour_checkbox = fig.add_axes([0.1, 0.25, 0.1, 0.15])\n ax_contour_checkbox.set_axis_bgcolor('w')\n contour_checkbox = CheckButtons(ax_contour_checkbox,\n ('Show Contours',), (True,))\n contour_checkbox.is_checked = True\n\n # Add the widgets.\n ax_slice = fig.add_axes([0.1, 0.1, 0.3, 0.05])\n ax_slice.set_axis_bgcolor('w')\n txt = 'Z: %.3f'%float(images[current_slice].ImagePositionPatient[-1]) \n sslice = Slider(ax_slice,\n txt,\n 0,\n len(images)-1,\n valinit=current_slice,\n valfmt=u'Slice: %d')\n\n def update(_):\n # Update image itself.\n current_slice = int(sslice.val)\n img.set_data(images[current_slice].pixel_array)\n txt='Z: %.3f'%float(images[current_slice].ImagePositionPatient[-1])\n sslice.label.set_text(txt)\n if contour_checkbox.is_checked:\n for i,c in enumerate(contour_lines):\n flag = (index_of_contour[i] == current_slice)\n flag = flag and (current_slice >= min_slice)\n flag = flag and (current_slice <= max_slice)\n # Set contour visible if flag is True.\n c.set_visible(flag)\n else:\n for c in contour_lines: c.set_visible(False)\n fig.canvas.draw_idle()\n\n def update_contours(_):\n contour_checkbox.is_checked = not contour_checkbox.is_checked\n update(None) # update requires an argument.\n\n sslice.on_changed(update)\n contour_checkbox.on_clicked(update_contours)\n\n plt.show()", "def vis_segmentation(image, seg_map):\n plt.figure(figsize=(15, 5))\n grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\n\n plt.subplot(grid_spec[0])\n plt.imshow(image)\n plt.axis('off')\n plt.title('input image')\n\n plt.subplot(grid_spec[1])\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n plt.imshow(seg_image)\n plt.axis('off')\n plt.title('segmentation map')\n\n plt.subplot(grid_spec[2])\n plt.imshow(image)\n plt.imshow(seg_image, alpha=0.7)\n plt.axis('off')\n plt.title('segmentation overlay')\n\n unique_labels = np.unique(seg_map)\n ax = plt.subplot(grid_spec[3])\n plt.imshow(\n FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\n ax.yaxis.tick_right()\n plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\n plt.xticks([], [])\n ax.tick_params(width=0.0)\n plt.grid('off')\n plt.show()", "def hide(self):\n self.geometry(\"%dx%d%+d%+d\" % (0, 0, 0, 0))", "def show_filters(self):\n w_mat = np.transpose(self.sess.run(self.W_fc1))\n\n plt.figure(figsize=(10,10), facecolor='w', edgecolor='w')\n plot_positions = [(0,0),(0,1),(1,0),(1,1)]\n for ch in range(self.n_input_channels):\n grid,_ = ia.image_grid_RGB( w_mat,\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res), n_x=6, n_y=6,\n channel_order=(ch,ch,ch), amplitude_scaling=(1,1,1),\n line_color=1, auto_scale=True, return_borders=False )\n colormax = np.abs(grid).max()\n with sns.axes_style(\"white\"):\n ax = plt.subplot2grid( (2,2), plot_positions[ch] )\n ax.imshow( grid[:,:,0], interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Hidden units, channel {}\".format(ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def hideIsoSurfaces(self):\r\n # research\r\n profprint()\r\n contourNode = slicer.util.getNode(self.contourNode)\r\n widget = slicer.modules.NeedleFinderWidget\r\n if contourNode != None:\r\n contourNode.SetDisplayVisibility(abs(widget.hideContourButton.isChecked() - 1))\r\n contourNode.GetModelDisplayNode().SetSliceIntersectionVisibility(abs(widget.hideContourButton.isChecked() - 1))", "def ShowOneContourCutBKG(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-100\n YMAX=100\n \n figname='contourCutBKG_{}_{}.pdf'.format(all_filt[index],index)\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-10:y0+10,:]=0\n reduc_image=full_image[y0+YMIN:y0+YMAX,x0:spec_index_max]/all_expo[index]\n \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n cs=plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image ,50, colors='white', linewidth=.001,origin='lower') \n \n \n cbar = plt.colorbar(cs) \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX*0.8,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def detail_mask(clip: vs.VideoNode,\n sigma: float = 1.0, rxsigma: List[int] = [50, 200, 350],\n pf_sigma: Optional[float] = 1.0, brz: Tuple[int, int] = (2500, 4500),\n rg_mode: int = 17) -> vs.VideoNode:\n bits, clip = _get_bits(clip)\n\n clip_y = get_y(clip)\n pf = core.bilateral.Gaussian(clip_y, sigma=pf_sigma) if pf_sigma else clip_y\n ret = core.retinex.MSRCP(pf, sigma=rxsigma, upper_thr=0.005)\n\n blur_ret = core.bilateral.Gaussian(ret, sigma=sigma)\n blur_ret_diff = core.std.Expr([blur_ret, ret], \"x y -\")\n blur_ret_dfl = core.std.Deflate(blur_ret_diff)\n blur_ret_ifl = iterate(blur_ret_dfl, core.std.Inflate, 4)\n blur_ret_brz = core.std.Binarize(blur_ret_ifl, brz[0])\n blur_ret_brz = core.morpho.Close(blur_ret_brz, size=8)\n\n prewitt_mask = core.std.Prewitt(clip_y).std.Binarize(brz[1])\n prewitt_ifl = prewitt_mask.std.Deflate().std.Inflate()\n prewitt_brz = core.std.Binarize(prewitt_ifl, brz[1])\n prewitt_brz = core.morpho.Close(prewitt_brz, size=4)\n\n merged = core.std.Expr([blur_ret_brz, prewitt_brz], \"x y +\")\n rm_grain = core.rgvs.RemoveGrain(merged, rg_mode)\n return rm_grain if bits == 16 else depth(rm_grain, bits)", "def ShowOneContour(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt,figname):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-15\n YMAX=15\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-20:y0+20,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image , 20, colors='black', linewidth=.5,origin='lower')\n \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX-3,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def overlayCard(src: np.array, card: np.array, mask:np.array, top_left: Point) -> Any:\r\n \r\n max_y = src.shape[0]\r\n # if top left is outside the src\r\n if top_left.y >= max_y:\r\n return\r\n\r\n # get the ROI\r\n rows, cols, _ = mask.shape\r\n roi = src[top_left.y:max_y, top_left.x:top_left.x+cols]\r\n roi_shape = roi.shape\r\n\r\n mask_grey = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\r\n _, mask = cv2.threshold(mask_grey, 127, 255, cv2.THRESH_BINARY)\r\n mask_inv = cv2.bitwise_not(mask)\r\n\r\n img_bg = cv2.bitwise_and(roi, roi, mask=mask_inv[:roi_shape[0], :roi_shape[1]])\r\n card_fg = cv2.bitwise_and(card, card, mask=mask)\r\n\r\n dst = cv2.add(img_bg, card_fg[:roi_shape[0], :roi_shape[1]])\r\n src[top_left.y:max_y, top_left.x:top_left.x+cols] = dst", "def bokeh_telluric_mask(fig, wl, I, mask_limit=0.9, fill_alpha=0.2, fill_color='red'):\n wl_mask = I < mask_limit\n mean_step = np.mean([wl[1]-wl[0], wl[-1]-wl[-2]]) # Average nominal step size\n starts, ends = mask_edges(wl[wl_mask], mean_step)\n Boxes = [BoxAnnotation(plot=fig, left=start, right= end, fill_alpha=fill_alpha, fill_color=fill_color) for start, end in zip(starts, ends)]\n fig.renderers.extend(Boxes)", "def vis_segmentation(image, seg_map):\r\n plt.figure(figsize=(15, 5))\r\n grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\r\n\r\n plt.subplot(grid_spec[0])\r\n plt.imshow(image)\r\n plt.axis('off')\r\n plt.title('input image')\r\n\r\n plt.subplot(grid_spec[1])\r\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\r\n plt.imshow(seg_image)\r\n plt.axis('off')\r\n plt.title('segmentation map')\r\n\r\n plt.subplot(grid_spec[2])\r\n plt.imshow(image)\r\n plt.imshow(seg_image, alpha=0.7)\r\n plt.axis('off')\r\n plt.title('segmentation overlay')\r\n\r\n unique_labels = np.unique(seg_map)\r\n ax = plt.subplot(grid_spec[3])\r\n plt.imshow(\r\n FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\r\n ax.yaxis.tick_right()\r\n plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\r\n plt.xticks([], [])\r\n ax.tick_params(width=0.0)\r\n plt.grid('off')\r\n plt.show()", "def vis_segmentation(image, seg_map):\n plt.figure(figsize=(15, 5))\n grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\n\n plt.subplot(grid_spec[0])\n plt.imshow(image)\n plt.axis('off')\n plt.title('input image')\n\n plt.subplot(grid_spec[1])\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n plt.imshow(seg_image)\n plt.axis('off')\n plt.title('segmentation map')\n\n plt.subplot(grid_spec[2])\n plt.imshow(image)\n plt.imshow(seg_image, alpha=0.7)\n plt.axis('off')\n plt.title('segmentation overlay')\n\n unique_labels = np.unique(seg_map)\n ax = plt.subplot(grid_spec[3])\n plt.imshow(\n FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\n ax.yaxis.tick_right()\n plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\n plt.xticks([], [])\n ax.tick_params(width=0.0)\n plt.grid('off')\n plt.show()", "def get_mask(self):\n w, h = self.rect.w, self.rect.h\n colorkey = (0, 0, 0)\n surface = pg.Surface((w, h))\n surface.set_colorkey(colorkey)\n # fill the surface with the spherical object\n color, center, radius = (255, 255, 255), self.rect.center, round(self.rect.w/2)\n pg.draw.circle(surface, color, center, radius)\n mask = pg.mask.from_surface(surface)\n return mask", "def __init__(self,obrazok):\n self.obrazok = pygame.transform.smoothscale(obrazok,(int(obrazok.get_width()/g.scaleObrazovky)+1,int(obrazok.get_height()/g.scaleObrazovky)+1))#+1 aby som mys nescalol na 0\n self.mask = pygame.mask.from_surface(self.obrazok)\n self.x = 0\n self.y = 0", "def drawCars(self):\n for car in self.cars:\n if car.aliveForFrames >= ALIVE_THRESHOLD:\n msg = 'ID: {0:>2}\\n'.format(car.id)\n msg += 'conf:{0:.2f}%\\n'.format(car.displayedConfidence * 100)\n msg += 'active: {} frames'.format(car.aliveForFrames - car.aliveForFrames % 5)\n self.car_detector.draw_boxes(self.image, tuple(car.box), msg)\n\n return self.image", "def __init__(self):\n DetectLandmarks.__init__(self)\n self.red_l = 0\n self.green_l = 0\n self.blue_l = 0\n self.red_e = 0\n self.green_e = 0\n self.blue_e = 0\n self.debug = 0\n self.image = 0\n self.width = 0\n self.height = 0\n self.im_copy = 0\n self.lip_x = []\n self.lip_y = []", "def image_mask(image, patch_R, patch_C, seg_model):\n\n im = Image.open(image)\n im_name = os.path.basename(image).split('.')[0]\n im_width, im_height = im.width, im.height\n\n N = patch_R // patch_C\n\n W_ps_NI = im_width // patch_C # 31782 // 256 = 124\n # W_ps_NR = slide_width % patch_C # 31782 % 256 = 38\n H_ps_NI = im_height // patch_R # 24529 // 1024 = 23\n # H_ps_NR = slide_height % patch_R # 24529 % 1024 = 977\n\n cell_ratio = 0.85 # the threshold that decide the patch is background or not\n\n output_dir = os.path.join(current_path, \"..\", \"output\", \"output_mask\")\n if not os.path.isdir(output_dir): os.makedirs(output_dir)\n\n np_im = np.array(im)[:, :, 0:3] # exclude alpha\n for w in range(W_ps_NI):\n for h in range(H_ps_NI):\n subHIC = np_im[h * patch_R: (h+1) * patch_R, w * patch_C:(w+1) * patch_C, :]\n\n # rgb three channels value that >200 and <40 are ignored segment\n rgb_s = (abs(subHIC[:, :, 0] - 120) >= 80) & (abs(subHIC[:, :, 1] - 120) >= 80) & (\n abs(subHIC[:, :, 2] - 120) >= 80) # >200 <40\n\n if np.sum(rgb_s) <= (patch_R * patch_C) * cell_ratio:\n # segment\n subHIC = np.where(rgb_similarity(subHIC, 15, 195), 250, subHIC)\n # adjust equalization histogram and adjust brightness\n for k in range(subHIC.shape[2]):\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(N * 4, 4))\n subHIC[:, :, k] = clahe.apply(subHIC[:, :, k])\n subHIC = exposure.adjust_gamma(subHIC, gamma=1.5)\n subHIC = subHIC.reshape(N, patch_C, patch_C, 3)\n\n subHIC = subHIC.reshape(N, patch_C, patch_C, 3)\n allmask_prob_list = maskrcnn_detection(seg_model, subHIC)\n\n for i in range(len(allmask_prob_list)):\n for layer in range(allmask_prob_list[i].shape[2]):\n image, cnts, hierarchy = cv2.findContours(allmask_prob_list[i][:, :, layer],\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n np_im[h * patch_R + i * patch_C: h * patch_R + (i + 1) * patch_C, w * patch_C:(w + 1) * patch_C,\n :] = cv2.drawContours(np_im[h * patch_R + i*patch_C: h*patch_R+(i+1)*patch_C, w * patch_C:(w + 1) * patch_C, :],\n cnts, -1, (0, 255, 0), 1)\n\n # np_im[h * patch_R + i*patch_C: h*patch_R+(i+1)*patch_C, w * patch_C:(w + 1) * patch_C, :] = subHIC[i]\n\n # plt.savefig(os.path.join(output_dir, f\"{im_name}w{w}h{h}N{i}.png\"))\n\n io.imsave(os.path.join(output_dir, f\"{im_name}.png\"), np_im)", "def on_draw_over_image(self):", "def ShowOneContourBKG(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt):\n \n figname='contourBKG_{}_{}.pdf'.format(all_filt[index],index)\n \n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-100\n YMAX=100\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0+YMIN:y0+YMAX,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n cs=plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n #C = plt.contour(X, Y, reduc_image ,10, colors='white', linewidth=.01,origin='lower')\n \n cbar = plt.colorbar(cs) \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX*0.8,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def draw_boxes(img, paths, exit_masks=[]):\r\n for path in paths:\r\n contour, centroid = path[-1][:2]\r\n # DONT DRAW IF VEHICLE EXITS\r\n if vehicle_exits(centroid, exit_masks): continue\r\n x, y, w, h = contour\r\n\r\n # DRAW RECTANGLE AND CIRCLE DENOTING THE BOUNDARY AND CENTROID OF VEHICLE\r\n cv2.rectangle(img, (x, y), (x + w - 1, y + h - 1),BOUNDING_BOX_COLOUR, 1)\r\n cv2.circle(img, centroid, 2, CENTROID_COLOUR, -1)\r\n return img", "def imshow(self):\n axes([0, 0, 1, 1], xticks=[], yticks=[])\n imshow(self.rgb_image())", "def show_trunk(height=2):\n for k in range(height):\n print(\"|\".center(GROUND_WIDTH))", "def visualize_predicted_and_true_segment_masks(rgb_imgs, xyz_imgs, seg_masks, label_imgs):\n N = rgb_imgs.shape[0]\n fig_index = 1\n for i in range(N):\n num_objs = max(np.unique(seg_masks[i,...]).max(), np.unique(label_imgs[i,...]).max()) + 1\n\n rgb = rgb_imgs[i].astype(np.uint8)\n \n depth = xyz_imgs[i,...,2]\n\n seg_mask_plot = util_.get_color_mask(seg_masks[i,...], nc=num_objs)\n gt_masks = util_.get_color_mask(label_imgs[i,...], nc=num_objs)\n\n images = [rgb, depth, seg_mask_plot, gt_masks]\n titles = [f'Image {i+1}', 'Depth',\n f\"Refined Masks. #objects: {np.unique(seg_masks[i,...]).shape[0]-1}\",\n f\"Ground Truth. #objects: {np.unique(label_imgs[i,...]).shape[0]-1}\"\n ]\n util_.subplotter(images, titles, fig_num=i+1)", "def test_display_methods_with_display_mode_tiled(img_3d_mni):\n display = plot_img(img_3d_mni, display_mode=\"tiled\")\n display.add_overlay(img_3d_mni, threshold=0)\n display.add_edges(img_3d_mni, color=\"c\")\n display.add_contours(\n img_3d_mni, contours=2, linewidth=4, colors=[\"limegreen\", \"yellow\"]\n )", "def plot_glide_depths(depths, mask_tag_filt):\n import numpy\n\n from . import plotutils\n\n fig, ax = plt.subplots()\n\n ax = plotutils.plot_noncontiguous(ax, depths, numpy.where(mask_tag_filt)[0])\n ax.invert_yaxis()\n\n plt.show()\n\n return None", "def visualize_2_panel(path: str, outfile: str, kernel: str, s_above=5):\n sns.set(style=\"white\", color_codes=True, font_scale=1)\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n fig.suptitle(SUB_TITLE, y=0.93)\n\n x, y = np.load('{}/meshgrids.npy'.format(path)) # coordinates\n sig = np.load('{}/sig_{}.npy'.format(path, kernel))\n data = np.load('{}/queried-data.npy'.format(path)).item()\n\n ra, dec, n_star = data[\"ra\"], data[\"dec\"], len(data[\"ra\"])\n extent = [x.min(), x.max(), y.min(), y.max()] # arg extent for imshow\n\n is_peak = sig > s_above\n mask = data[\"sig_{}\".format(kernel)] > s_above\n\n axes[0].plot(ra, dec, '.', c='deepskyblue', ms=0.5, alpha=0.5)\n axes[0].plot(ra[mask], dec[mask], '.', c='orange', ms=1)\n axes[0].set_title('%d stars' % n_star)\n axes[1].set_title('%s: sig > %0.1f= %d pixels' % (kernel, s_above, np.sum(is_peak)))\n\n for u in range(2):\n axes[u].imshow(is_peak, cmap='copper', vmin=-0.01, vmax=1.01,\n extent=extent, origin='lower')\n axes[u].tick_params(axis='both', which='both',\n labelleft=False, labelbottom=False)\n axes[u].set_xlim(axes[u].set_xlim()[::-1]) # flipping\n\n _filename = \"{}-{}.png\".format(outfile, kernel)\n plt.savefig(_filename, bbox_inches='tight', dpi=300)", "def __init__(self, g_impath, f_impath):\n self.image_g = cv2.imread(g_impath)\n assert self.image_g is not None\n if f_impath is None:\n self.image_f = self.image_g\n else:\n self.image_f = cv2.imread(f_impath)\n assert self.image_f is not None\n self.f_path = f_impath\n self.g_path = g_impath\n self.mask = np.zeros_like(self.image_g)\n self.draw = False\n self.size = 5\n self.image_g_reset = self.image_g.copy()\n self.image_f_reset = self.image_f.copy()\n self.mask_reset = self.mask.copy()\n self.original_mask_copy = np.zeros(self.image_f.shape)\n self.window_name = \"Draw mask: s-save; r:reset; q:quit; l:larger painter; m:smaller painter\"\n self.window_name_move = \"Move mask: s-save; r:reset; q:quit;\"\n self.to_move = False\n self.move=False\n self.x0 = 0\n self.y0 = 0\n self.is_first = True\n self.xi = 0\n self.yi = 0", "def help():\n return \"\"\"\n MASK EDITOR\n [h] help\n [x] clear mask\n [+] increase shape size\n [-] decrease shape size\n [ ] next image (saves mask)\n [a] point mode (default)\n [s] line mode\n [f] flood fill\n CTRL paint mask\n SHFT unpaint mask\n ESC exit\n 1-10 label value\n\"\"\"", "def displayContours(self):\r\n # obsolete?\r\n profbox()\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n for modelNode in modelNodes.values():\r\n if modelNode.GetAttribute(\"contour\") == \"1\":\r\n needleNode = slicer.mrmlScene.GetNodeByID(modelNode.GetAttribute(\"needleID\"))\r\n if needleNode != None:\r\n if needleNode.GetDisplayVisibility() == 1:\r\n modelNode.SetDisplayVisibility(abs(int(slicer.modules.NeedleFinderWidget.displayContourButton.checked) - 1))\r\n d = modelNode.GetDisplayNode()\r\n d.SetSliceIntersectionVisibility(abs(int(slicer.modules.NeedleFinderWidget.displayContourButton.checked) - 1))" ]
[ "0.71640295", "0.67468154", "0.6488188", "0.63977045", "0.62672323", "0.6256299", "0.623426", "0.62271696", "0.6222103", "0.6217956", "0.6209555", "0.6123184", "0.6110045", "0.6090729", "0.6049397", "0.6028649", "0.59504765", "0.5926747", "0.5888969", "0.5847702", "0.5817907", "0.5813", "0.5771264", "0.5720918", "0.5686244", "0.5677598", "0.56713897", "0.5670608", "0.5657213", "0.5648365", "0.56425816", "0.56403637", "0.56229466", "0.5618992", "0.561174", "0.56076837", "0.56023836", "0.5594699", "0.5594606", "0.5585185", "0.55697626", "0.55669415", "0.55662423", "0.5552182", "0.5531507", "0.55283105", "0.552347", "0.5508857", "0.55040085", "0.55026144", "0.55000955", "0.54943705", "0.54662246", "0.5456645", "0.5445945", "0.5445773", "0.5443178", "0.54381156", "0.5436772", "0.54326344", "0.54323477", "0.54293907", "0.5427496", "0.5424575", "0.54237336", "0.5417853", "0.5415807", "0.5415209", "0.5414548", "0.5412997", "0.5405216", "0.5395652", "0.5391567", "0.5391284", "0.53853714", "0.53774744", "0.5377058", "0.53759193", "0.53722185", "0.5370714", "0.53700745", "0.5361292", "0.53603995", "0.5356421", "0.535374", "0.53532135", "0.53493124", "0.5342619", "0.53415716", "0.53410995", "0.53388065", "0.5336105", "0.5332823", "0.53292817", "0.53227127", "0.5321548", "0.53197616", "0.5318341", "0.53174657", "0.5316958", "0.5316598" ]
0.0
-1
Parse the first YAML document in a stream and produce the corresponding Python object.
def load(stream, schema=None): stream_type = str(type(stream)) if stream_type not in ("<type 'unicode'>", "<type 'str'>", "<class 'str'>"): raise TypeError("StrictYAML can only read a string of valid YAML.") document = ruamelyaml.load(stream, Loader=ruamelyaml.RoundTripLoader) # Document is single item (string, int, etc.) if type(document) not in (CommentedMap, CommentedSeq): document = stream for token in ruamelyaml.scan(stream): if type(token) == ruamelyaml.tokens.TagToken: raise exceptions.TagTokenDisallowed( document, token.start_mark.line + 1, token.end_mark.line + 1 ) if type(token) == ruamelyaml.tokens.FlowMappingStartToken: raise exceptions.FlowMappingDisallowed( document, token.start_mark.line + 1, token.end_mark.line + 1 ) if type(token) == ruamelyaml.tokens.AnchorToken: raise exceptions.AnchorTokenDisallowed( document, token.start_mark.line + 1, token.end_mark.line + 1 ) if schema is None: schema = Any() return schema(document)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(self, stream):\n ret = yaml.load(stream)\n self.validate(ret)\n return (ret, self.make_order(ret))", "def construct_yaml_stream(in_stream):\n\n global _yaml_initialized\n\n logger.info('Request to construct yaml')\n\n if not _yaml_initialized:\n def _object_creator(loader, node, deep=True):\n mapping = {}\n for key_node, value_node in node.value:\n key = loader.construct_object(key_node, deep=deep)\n value = loader.construct_object(value_node, deep=deep)\n mapping[key] = value\n\n if '__factory__' in mapping:\n print('I am here')\n try:\n _cls = mapping['__factory__']\n del mapping['__factory__']\n logger.debug('__factory__ found in yaml, attempting to construct %s', _cls)\n\n # This line is used for referencing modules by a registered alias\n if type(_cls) == str:\n registrar_values = find_type(_cls)\n _cls = registrar_values['factory_method']\n default_args = registrar_values['default_values']\n mapping = {**default_args, **mapping}\n\n return _cls(**mapping)\n except Exception as e:\n logger.error('Failed to construct yaml object %s, %s', e, str(mapping))\n raise e\n\n return loader.construct_mapping(node, deep)\n\n logger.info(f'Registering yaml constructor for python !obj types')\n yaml.add_constructor('!obj', _object_creator, yaml.Loader)\n\n _yaml_initialized = True\n\n return yaml.load(in_stream)", "def load_yaml(yaml_stream):\n\n return yaml.safe_load(yaml_stream)", "def __ordered_load(self, stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n class OrderedLoader(Loader):\n pass\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n lambda loader, node: object_pairs_hook(loader.construct_pairs(node)))\n return yaml.load(stream, OrderedLoader)", "def parse(self, stream, media_type=None, parser_context=None):\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n decoded_stream = codecs.getreader(encoding)(stream)\n raw_body = decoded_stream.read()\n request = parser_context.get('request')\n setattr(request, 'raw_body', raw_body)\n filename = self.get_filename(stream, media_type, parser_context)\n if filename and (not filename.endswith('.toml') and not filename.endswith('.tml')):\n filename = f'{filename}.toml'\n setattr(request, 'filename', filename)\n return toml.loads(raw_body)", "def FromYAML(cls, source):\n\n # Late import to avoid a circular dependency.\n try:\n import bulletml.bulletyaml\n import yaml\n except ImportError:\n raise ParseError(\"PyYAML is not available\")\n else:\n try:\n return yaml.load(source)\n except Exception as exc:\n raise ParseError(str(exc))", "def parse_stream(stream):\n try:\n root = etree.parse(stream).getroot()\n except:\n root = etree.fromstring(stream)\n return parse_node(root)", "def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n class OrderedLoader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n\n OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)\n return yaml.load(stream, Loader=OrderedLoader)", "def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n\n class OrderedLoader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n return yaml.load(stream, OrderedLoader)", "def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=AttrDict):\n class Ordered_Loader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n Ordered_Loader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n return yaml.load(stream, Ordered_Loader)", "def ordered_load(stream, Loader=yaml_Loader, object_pairs_hook=OrderedDict):\n class OrderedLoader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n return yaml.load(stream, OrderedLoader)", "def parse_stream(self, stream: IO[str], debug: bool = False) -> NL:\n return self.parse_stream_raw(stream, debug)", "def main(cls, **kwargs):\n try:\n import file_transformer\n except Exception as e:\n sys.exit(\"{}\\nSee https://github.com/benkehoe/file-transformer\".format(e))\n \n def loader(input_stream, args):\n return yaml.load(input_stream)\n \n def processor(input, args):\n transform = cls(input, vars(args))\n transform.apply()\n return transform.template\n \n def dumper(output, output_stream, args):\n yaml.dump(output, output_stream)\n \n return file_transformer.main(processor, loader, dumper, **kwargs)", "def Deserializer(stream_or_string, **options):\n if not isinstance(stream_or_string, (bytes, str)):\n stream_or_string = stream_or_string.read()\n if isinstance(stream_or_string, bytes):\n stream_or_string = stream_or_string.decode()\n try:\n objects = json.loads(stream_or_string)\n yield from PythonDeserializer(objects, **options)\n except (GeneratorExit, DeserializationError):\n raise\n except Exception as exc:\n raise DeserializationError() from exc", "def _open_yaml(stream, original_file=None, substitutions_dict={}):\n try:\n yaml_contents = yaml.load(stream, Loader=yaml_SafeLoader)\n\n return _get_yaml_contents_without_documentation_complete(yaml_contents, substitutions_dict)\n except DocumentationNotComplete as e:\n raise e\n except Exception as e:\n count = 0\n _file = original_file\n if not _file:\n _file = stream\n with open(_file, \"r\") as e_file:\n lines = e_file.readlines()\n for line in lines:\n count = count + 1\n if re.match(r\"^\\s*\\t+\\s*\", line):\n print(\"Exception while handling file: %s\" % _file, file=sys.stderr)\n print(\"TabIndentationError: Line %s contains tabs instead of spaces:\" % (count), file=sys.stderr)\n print(\"%s\\n\\n\" % repr(line.strip(\"\\n\")), file=sys.stderr)\n sys.exit(1)\n\n print(\"Exception while handling file: %s\" % _file, file=sys.stderr)\n raise e", "def yaml_ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n class OrderedLoader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n\n return yaml.load(stream, OrderedLoader)", "def read(self, stream):\n ret = json.load(stream)\n self.validate(ret)\n self.stringify(ret)\n return (ret, self.make_order(ret))", "def yaml_parse(yamlstr):\n try:\n # PyYAML doesn't support json as well as it should, so if the input\n # is actually just json it is better to parse it with the standard\n # json parser.\n return json.loads(yamlstr, object_pairs_hook=OrderedDict)\n except ValueError:\n loader = SafeLoaderWrapper\n loader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, \n _dict_constructor)\n loader.add_multi_constructor(\"!\", intrinsics_multi_constructor)\n return yaml.load(yamlstr, loader)", "def parse(self, stream, media_type=None, parser_context=None):\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n decoded_stream = codecs.getreader(encoding)(stream)\n return decoded_stream", "def parse(stream):\n return xsd_models.parseString(stream, silence=True)", "def from_yaml(cls, yml: str):\n\n return cls.from_dict(feast_yaml.yaml_loader(yml, load_single=True))", "def from_yaml(self, content):\r\n if yaml is None:\r\n raise UnsupportedDeserializationFormat(\"Usage of the YAML aspects requires yaml.\")\r\n\r\n return yaml.load(content, Loader=DeliciousCakeLoader)", "def get_protobuf(self, stream):\n obj = yaml.load(stream)\n\n robot = Robot()\n robot.id = obj.get('id', 0)\n robot.body.CopyFrom(self.body_decoder.decode(obj))\n robot.brain.CopyFrom(self.brain_decoder.decode(obj))\n return robot", "def from_content(cls, content: str) -> Any:\n cls._check_yaml()\n return yaml.safe_load(content)", "def parse(self, stream, media_type=None, parser_context=None):\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n\n try:\n decoded_stream = codecs.getreader(encoding)(stream)\n parse_constant = strict_constant if self.strict else None\n return ujson.load(decoded_stream, parse_constant=parse_constant)\n except ValueError as exc:\n raise ParseError('JSON parse error - %s' % str(exc))", "def yaml_to_python():\n\n some_string = \"\"\"\nname: Glen Jarvis\nsex: Male\ntitle: Senior Developer\nhp: [32, 71]\nsp: [1, 13]\ngold: 423\ninventory:\n - A laptop\n - Some code\n - A lot of hope\n\"\"\"\n\n some_python = yaml.load(some_string)\n\n print(\"YAML -> Python Example\")\n print(\"type(some_string): {0}\".format(type(some_string)))\n print(\"type(some_python): {0}\".format(type(some_python)))\n\n print(\"\\n\\nYAML (really string in Python):\")\n pprint.pprint(some_string)\n print(\"\\n\\nPython:\")\n pprint.pprint(some_python)", "def _deserialize(self):\n try:\n self._as_dict = yaml.load(self.path)\n except ScannerError as e:\n raise exc.ContentSerializeError(self, self.path, e.problem)", "def parse(self, stream, media_type=None, parser_context=None):\n raise NotImplementedError(\".parse() must be overridden.\")", "def load(cls, stream):\n data = json.load(stream)['ts']\n return cls(data['time'], data['inc'])", "def from_yaml(cls, b):\n return cls.from_dict(yaml.safe_load(b))", "def parser(stream, objconf, tuples, **kwargs):\n return map(kwargs[\"func\"], stream)", "def from_yaml(cls, path: str) -> \"EtlSettings\":\n with fsspec.open(path) as f:\n yaml_file = yaml.safe_load(f)\n return cls.parse_obj(yaml_file)", "def load(cls, data: TextIO) -> \"OpenAPI\":\n return cls(yaml.safe_load(data))", "def yaml_to_stream(data):\n yaml_stream = YamlStream()\n yaml.dump(data, yaml_stream, Dumper=yaml.Dumper)\n return yaml_stream", "def read(self, source_path):\n\n content = None\n metadata = dict()\n self._source_path = source_path\n with pelican_open(source_path) as text:\n metadata = yaml.load(text)\n\n # Turn these into expected objects\n # 'author': pelican.urlwrappers.Author object\n if 'author' in metadata:\n metadata['author'] = Author(metadata['author'], self.settings)\n # 'date' and 'modified': datetime.Date object (initially UTC)\n for key in ['date', 'modified']:\n if key in metadata:\n metadata[key] = dateutil.parser.parse(metadata[key])\n return content, metadata", "def load_yaml_guess_indent(stream, **kw):\n # type: (StreamTextType, Any) -> Any\n from .main import round_trip_load\n\n # load a YAML document, guess the indentation, if you use TABs you're on your own\n def leading_spaces(line):\n # type: (Any) -> int\n idx = 0\n while idx < len(line) and line[idx] == \" \":\n idx += 1\n return idx\n\n if isinstance(stream, text_type):\n yaml_str = stream # type: Any\n elif isinstance(stream, binary_type):\n # most likely, but the Reader checks BOM for this\n yaml_str = stream.decode(\"utf-8\")\n else:\n yaml_str = stream.read()\n map_indent = None\n indent = None # default if not found for some reason\n block_seq_indent = None\n prev_line_key_only = None\n key_indent = 0\n for line in yaml_str.splitlines():\n rline = line.rstrip()\n lline = rline.lstrip()\n if lline.startswith(\"- \"):\n l_s = leading_spaces(line)\n block_seq_indent = l_s - key_indent\n idx = l_s + 1\n while line[idx] == \" \": # this will end as we rstripped\n idx += 1\n if line[idx] == \"#\": # comment after -\n continue\n indent = idx - key_indent\n break\n if map_indent is None and prev_line_key_only is not None and rline:\n idx = 0\n while line[idx] in \" -\":\n idx += 1\n if idx > prev_line_key_only:\n map_indent = idx - prev_line_key_only\n if rline.endswith(\":\"):\n key_indent = leading_spaces(line)\n idx = 0\n while line[idx] == \" \": # this will end on ':'\n idx += 1\n prev_line_key_only = idx\n continue\n prev_line_key_only = None\n if indent is None and map_indent is not None:\n indent = map_indent\n return round_trip_load(yaml_str, **kw), indent, block_seq_indent", "def __init__ (self, stream):\n self.classes = [ ]\n self.functions = [ ]\n self.classDocs = [ ]\n\n if stream is not None:\n self.read(stream)", "def test_PhonopyYaml_read_with_stream(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n with open(filename) as fp:\n cell = _get_unitcell(fp)\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def load(datastream):", "def parse_stream_raw(self, stream: IO[str], debug: bool = False) -> NL:\n tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)\n return self.parse_tokens(tokens, debug)", "def loadFromStream(self, stream, uri=None):\n self.loadFromDom(parseStream(stream))", "def _read(self, text):\n return yaml.safe_load(text)", "def _construct_seq(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:\n (obj,) = loader.construct_yaml_seq(node)\n return _add_reference(obj, loader, node)", "def parse(self, stream, media_type=None, parser_context=None):\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n\n try:\n data = stream.read().decode(encoding)\n return json.loads(data)\n except ValueError as exc:\n raise ParseError('JSON parse error - %s' % six.text_type(exc))", "def _stream(self, d):\n length = d['Length']\n token = self.read(6)\n if token != b'stream':\n self.on_parser_error(\"stream expected\")\n # `stream` keyword must be followed by CR+LF or by LF, but NOT by CR alone\n ch = self.next()\n if ch == CR:\n ch = self.next()\n if ch != LF:\n logging.warning(\"Missing LF after `stream` token - [CR]LF expected. Trying to proceed.\")\n self.prev()\n\n state = self.get_state()\n\n data = self.read(length)\n # According to the spec EOL should be after the data and before endstream\n # But some files do not follow this.\n #\n # See data/leesoil-cases-2.pdf\n #\n # self.eol()\n self.maybe_spaces()\n token = self.read(9)\n if token != b'endstream':\n # Work around wrong length. See https://github.com/maxpmaxp/pdfreader/issues/68\n err_state = self.get_state()\n logging.warning(\"Wrong stream length: {}. Trying to work around the issue.\".format(length))\n self.set_state(state)\n data = self.read(9)\n while not data.endswith(b'endstream'):\n ch = self.next()\n if ch is None:\n self.set_state(err_state)\n self.on_parser_error(\"endstream expected\")\n data += ch\n\n data = data[:-9]\n while data and data[-1:] in EOL:\n data = data[:-1]\n\n return Stream(d, data)", "def read(self, stream):\n self.inspect_quick(stream)\n pyffi.object_models.xml.struct_.StructBase.read(\n self, stream, self)\n\n # check if we are at the end of the file\n if stream.read(1):\n raise ValueError(\n 'end of file not reached: corrupt psk file?')", "def __call__(input_stream, config_variant=u\"\"):", "def _yaml_load(src):\n if not isinstance(src, str):\n try:\n src_name = src.name\n except AttributeError:\n src_name = '<yaml stringio>'\n # Force-load file streams as that allows the parser to print\n # much more context when it encounters an error\n src = src.read()\n else:\n src_name = '<yaml string>'\n try:\n return yaml.safe_load(src)\n except yaml.YAMLError:\n logging.error('Parser error when reading YAML from {}.'.format(src_name))\n raise", "def Convert(self, input_stream, output_stream):\n # For simpler processing just load the entire file into memory.\n input_lines = input_stream.readlines()\n input_line = 1\n\n # First extract pragmas, which must be placed at the top of the file.\n input_line = self._ExtractPragmas(input_line, input_lines, output_stream)\n\n # Now ignore any starting vertical whitespace.\n input_line = self._MoveToMain(input_line, input_lines, output_stream)\n\n # At the main text, begin processing.\n input_line = self._ProcessBody(input_line, input_lines, output_stream)\n\n # Done, but sanity check the amount of input processed.\n remaining_lines = len(input_lines) - input_line + 1\n if remaining_lines != 0:\n self._warning_method(\n input_line,\n u\"Processing completed, but not all lines were processed. \"\n \"Remaining lines: {0}.\".format(remaining_lines))", "def yaml_to_object(yaml_file):\n with open(yaml_file) as f:\n config = yaml_load(f, Loader=yaml_FullLoader)\n return dict_to_object(config)", "def from_yaml(cls, model: nn.Module, yaml_path: str) -> pl.LightningModule:\n with open(yaml_path, \"r\") as stream:\n kwargs = yaml.full_load(stream)\n\n return cls(model, **kwargs)", "def yaml_operation_parse(self, path_to_yaml, schema_name):\n\n # TODO: Add validation logic for YAML\n\n with open(path_to_yaml, 'r') as f:\n api_doc = yaml.load(f)\n\n self.tags = []\n self.summary = api_doc['summary']\n self.description = api_doc['description']\n if self.valid_content_type(api_doc['consumes']):\n self.consumes = api_doc['consumes']\n if self.valid_content_type(api_doc['produces']):\n self.produces = api_doc['produces']\n self.parameters = api_doc['parameters']\n self.responses = api_doc['responses']\n\n # TODO: Make sure all operation parameters have been filled with valid values\n\n self.yaml_operation_update(schema_name)", "def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"ToJson\":\n try:\n value = loader.construct_mapping(node, deep=True)\n except yaml.constructor.ConstructorError:\n value = loader.construct_sequence(node, deep=True)\n return cls(value)", "def load_yaml(content):\n from yaml import load, FullLoader\n return load(content, Loader=FullLoader)", "def from_yaml(\n cls,\n yml: str,\n defaults: Optional[bool]=False,\n path: Optional[str]=None,\n keys: Optional[str]=None) -> 'Parser':\n fname = Path(yml)\n if defaults:\n # load from 'ctwrap/defaults' database\n fname = Path(__file__).parents[0] / 'defaults' / fname\n elif path is not None:\n fname = Path(path) / fname\n\n try:\n _ = fname.is_file() # will raise error\n with open(fname) as stream:\n out = yaml.load(stream, Loader=yaml.SafeLoader)\n except OSError:\n out = yaml.load(yml, Loader=yaml.SafeLoader)\n\n if keys is None:\n return cls(out)\n\n return cls({k: out[k] for k in keys})", "def parse(self, stream, media_type=None, parser_context=None):\n if isinstance(stream, WSGIRequest):\n return oadr_20b.parseString(stream.body, silence=True)\n elif hasattr(stream, 'buf'):\n return oadr_20b.parseString(stream.buf, silence=True)\n\n return None", "def __init__(self, stream):\n self.stream = stream", "def __init__(self, stream):\n self.stream = stream", "def from_yaml(cls, y):\n return cls(yaml.load(y, AttrLoader))", "def from_path(cls, path: str) -> Any:\n cls._check_yaml()\n with open(path) as f:\n return yaml.safe_load(f)", "def FromDocument(cls, source):\n if not hasattr(source, 'read'):\n source = StringIO(source)\n start = source.read(1)\n source.seek(0)\n if start == \"<\":\n return cls.FromXML(source)\n elif start == \"!\" or start == \"#\":\n return cls.FromYAML(source)\n else:\n raise ParseError(\"unknown initial character %r\" % start)", "def from_yaml(c: Any, s: str, de: Type[Deserializer[str]] = YamlDeserializer, **opts: Any) -> Any:\n return from_dict(c, de.deserialize(s, **opts), reuse_instances=False)", "def load(cls, filename_or_stream: str | PathLike | TextIO) -> \"Experiment\":\n if isinstance(filename_or_stream, (str, PathLike)):\n p = Path(filename_or_stream)\n if not p.suffix:\n p = p.with_suffix(\".json\")\n s: TextIO = open(p, \"r\")\n close = True\n else:\n s = filename_or_stream\n close = False\n\n exp = cls._structure(json.load(s))\n if close:\n s.close()\n return exp", "def parse_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"YamlModifier\":\n return cls._from_yaml(loader, node)", "def run(self):\n args = self._parse_args(self._argv)\n with open(args['yaml']) as yaml_file:\n yaml_dict = yaml.safe_load(yaml_file) # returns list<dict>\n yaml_dict = yaml_dict[0]['machine_learning_setup']\n data = DataIngest(yaml_dict['data']).get()\n return PipelineWrapper(yaml_dict['pipeline']).fit_transform(data)", "def load_stream(source):\n raise NotImplementedError(\"not implemented yet\")", "def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"BotoError\":\n value = loader.construct_mapping(node, deep=True)\n return cls(value)", "def load_yaml(path):\n fsock = open(path)\n \n try:\n yaml_string = fsock.read()\n yaml_obj = yaml.load(yaml_string)\n \n finally:\n fsock.close()\n\n return yaml_obj", "def from_yaml(cls, path: str) -> \"Pipeline\":\n pipeline_configuration = PipelineConfiguration.from_yaml(path)\n\n return cls.from_config(pipeline_configuration)", "def load(text: str, options: Dict[str, str]) -> object:\n try:\n docs = list(yaml.safe_load_all(text)) # only safe features\n except yaml.YAMLError as e:\n raise LoadingError(\"Can't parse YAML\") from e # must use ValueError\n if len(docs) == 0:\n return {}\n if len(docs) == 1:\n return docs[0] # only one document\n return docs # leave as a list of documents", "def yaml_loads(value):\n return yaml.load(value)", "def load_config(stream):\n cnf = json.load(stream)\n return config.solve_version(cnf)", "def test_read_phonopy_yaml_with_stream(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n with open(filename) as fp:\n cell = read_phonopy_yaml(fp).unitcell\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def _load_datas(self) -> tp.Dict[str, dict]:\n with open(self._file, \"r\") as stream:\n try:\n load: tp.Dict[str, dict] = yaml.safe_load(stream)\n logger.info(\"YAML imported\")\n return load\n except yaml.YAMLError as exc:\n logger.debug(\"YAML import error : %s\", exc)\n raise", "def parse(self, stream, media_type=None, parser_context=None):\n\n parser_context = parser_context or {}\n request = parser_context['request']\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n meta = request.META\n upload_handlers = request.upload_handlers\n filename = self.get_filename(stream, media_type, parser_context)\n\n # Note that this code is extracted from Django's handling of\n # file uploads in MultiPartParser.\n content_type = meta.get('HTTP_CONTENT_TYPE',\n meta.get('CONTENT_TYPE', ''))\n try:\n content_length = int(meta.get('HTTP_CONTENT_LENGTH',\n meta.get('CONTENT_LENGTH', 0)))\n except (ValueError, TypeError):\n content_length = None\n\n if not filename:\n filename = 'autosave.zip'\n\n # See if the handler will want to take care of the parsing.\n for handler in upload_handlers:\n result = handler.handle_raw_input(None,\n meta,\n content_length,\n None,\n encoding)\n if result is not None:\n return DataAndFiles(None, {'file': result[1]})\n\n # This is the standard case.\n possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]\n chunk_size = min([2 ** 31 - 4] + possible_sizes)\n chunks = ChunkIter(stream, chunk_size)\n counters = [0] * len(upload_handlers)\n\n for handler in upload_handlers:\n try:\n handler.new_file(None, filename, content_type,\n content_length, encoding)\n except StopFutureHandlers:\n break\n\n for chunk in chunks:\n for i, handler in enumerate(upload_handlers):\n chunk_length = len(chunk)\n chunk = handler.receive_data_chunk(chunk, counters[i])\n counters[i] += chunk_length\n if chunk is None:\n break\n\n for i, handler in enumerate(upload_handlers):\n file_obj = handler.file_complete(counters[i])\n if file_obj:\n return DataAndFiles(None, {'file': file_obj})\n raise ParseError(\"FileUpload parse error - \"\n \"none of upload handlers can handle the stream\")", "def __init__(self, yaml_file_path: Path) -> None:\n with yaml_file_path.open(\"r\") as yaml_file:\n self._yaml = YAML().load(yaml_file.read())", "def load_yaml(input_path):\n yaml = ruamel.yaml.YAML()\n with open(input_path, 'rb') as input_file:\n return yaml.load(input_file)", "def from_yaml_string(cls, string):\n return cls(_yaml_load(string))", "def unpack(stream, **kwargs):\n if \"object_pairs_hook\" not in kwargs:\n object_hook = kwargs.get(\"object_hook\")\n for decoder in msgpack_decoders.get_all().values():\n object_hook = functools.partial(decoder, chain=object_hook)\n kwargs[\"object_hook\"] = object_hook\n return _unpack(stream, **kwargs)", "def unroll_stream(\n stream: Generator, skip_first: bool = False, pbar: Union[bool, Sequence[Any]] = True\n) -> Any:\n # init\n obs = next(stream)\n\n obs_flat, treedef = tree_flatten(obs)\n num_leaves = len(obs_flat)\n\n # stream_scan\n def _init_outputs():\n if skip_first:\n return [[]] * num_leaves\n else:\n return list(map(lambda x: [x], obs_flat))\n\n outputs = _init_outputs()\n\n if pbar:\n stream = tqdm(stream, desc=\"stream_unroll\")\n\n for obs in stream:\n obs_flat = tree_leaves(obs)\n assert len(obs_flat) == num_leaves\n for y, x in zip(outputs, obs_flat):\n y.append(x)\n\n # stack outputs\n for i in range(num_leaves):\n outputs[i] = onp.stack(outputs[i])\n\n # transpose outputs\n return tree_unflatten(treedef, outputs)", "def dangerous_load(text: str, options: Dict[str, str]) -> object:\n try:\n docs = list(yaml.full_load_all(text)) # load the full yaml\n except yaml.YAMLError as e:\n raise LoadingError(\"Can't parse YAML\") from e # must use ValueError\n if len(docs) == 0:\n return {}\n if len(docs) == 1:\n return docs[0] # only one document\n return docs # leave as a list of documents", "def _parse_collection(source):\n with text_stream(source) as f:\n coll_node = json.load(f)\n return coll_node, coll_node['documents']", "def load(cls, yaml_or_json):\n try:\n result = yaml.safe_load_all(yaml_or_json)\n except:\n try:\n result = json.loads(yaml_or_json)\n if isinstance(result, dict):\n result = (result for _ in range(1))\n except:\n result = None\n\n return result", "def test_event_pre_yaml_parse(self) -> None:\n\n @Event.PreYAMLParse.subscribe\n def hook(string: str) -> Optional[str]:\n return self.EXAMPLE_YAML_FILE\n\n assert Event.PreYAMLParse.validate()\n\n reference = self.EXAMPLE_ENTRY_DICT.copy()\n entries = YAMLParser().parse(\"Hello world!\")\n entry = list(entries.values())[0]\n assert entry.data == reference", "def import_(self, node):\n yamal_name = os.path.join(self._root, self.construct_scalar(node))\n\n with open(yamal_name, 'r') as yamal_file:\n return yaml.load(yamal_file, ImportLoader)", "def from_yaml_string(cls, yaml_string: Text, check_params=False):\n Params._check_yaml_import()\n import yaml\n\n lparams = yaml.safe_load(yaml_string)\n if check_params:\n return cls(**lparams)\n else:\n return cls.from_dict(lparams, return_instance=True, return_unused=False)", "def _unserialize(text):\n return yaml.safe_load(text)", "def __init__(self, stream, reader_schema=None):\n self.stream = stream\n self.reader_schema = (\n normalize_schema(reader_schema) if reader_schema else None\n )\n\n self._read_header()\n\n # Verify `codec`\n if self.codec == 'snappy' and not snappy:\n raise ValueError(\n \"Cannot read 'snappy' codec: 'snappy' module is not available\"\n )\n elif self.codec not in ('null', 'deflate'):\n raise ValueError('Unknown codec: %r' % self.codec)\n\n # Register the schema\n acquaint_schema(self.writer_schema)\n if reader_schema:\n populate_schema_defs(reader_schema)\n\n self._iterator = self._record_iterator()", "def parse_mapreduce_yaml(contents):\n try:\n builder = yaml_object.foo(MapReduceYaml)\n handler = yaml_builder.foo(builder)\n listener = yaml_listener.foo(handler)\n listener.foo(contents)\n\n mr_info = handler.foo()\n except (ValueError, yaml_errors.EventError), e:\n raise errors.foo(e)\n\n if foo(mr_info) < 1:\n raise errors.foo(\"No configs found in mapreduce.yaml\")\n if foo(mr_info) > 1:\n raise errors.foo(\"Found %d YAML documents\" %\n foo(mr_info))\n\n jobs = mr_info[0]\n job_names = foo(j.name for j in jobs.mapreduce)\n if foo(jobs.mapreduce) != foo(job_names):\n raise errors.foo(\n \"Overlapping mapreduce names; names must be unique\")\n\n return jobs", "def from_yaml(cls, yaml_string=None, filename=None, encoding='utf-8', errors='strict', loader=yaml.SafeLoader, **kwargs):\n bx_args = {}\n for arg in kwargs.copy():\n if arg in BOX_PARAMETERS:\n bx_args[arg] = kwargs.pop(arg)\n data = _from_yaml(yaml_string=yaml_string, filename=filename, encoding=encoding, errors=errors, Loader=loader, **kwargs)\n if not isinstance(data, dict):\n raise BoxError('yaml data not returned as a dictionarybut rather a {0}'.format(type(data).__name__))\n return cls(data, **bx_args)", "def _read_json_array_stream(stream):\n # Read till the beginning of the array ie. first '['\n while True:\n char = stream.read(1)\n if char == '[':\n break\n if char == '':\n return\n #\n buffer = ''\n braces = 0\n while True:\n char = stream.read(1)\n if char == '':\n return\n # Skip separators between objects.\n if char == ',' and braces == 0:\n continue\n # Store characters.\n buffer += char\n if char == '{':\n braces += 1\n elif char == '}':\n braces -= 1\n if braces == 0:\n yield json.loads(buffer)\n buffer = ''", "def loadseasoning(self):\n stream = open(self.fileref)\n self.config = yaml.safe_load(stream)\n stream.close()", "def read_stream(schema, stream, *, buffer_size=io.DEFAULT_BUFFER_SIZE):\n reader = _lancaster.Reader(schema)\n buf = stream.read(buffer_size)\n remainder = b''\n while len(buf) > 0:\n values, n = reader.read_seq(buf)\n yield from values\n remainder = buf[n:]\n buf = stream.read(buffer_size)\n if len(buf) > 0 and len(remainder) > 0:\n ba = bytearray()\n ba.extend(remainder)\n ba.extend(buf)\n buf = memoryview(ba).tobytes()\n if len(remainder) > 0:\n raise EOFError('{} bytes remaining but could not continue reading '\n 'from stream'.format(len(remainder)))", "def from_stream(stream):\n results = []\n for doc in stream:\n results.append(from_dict(data_class=SearchEntity, data=doc.to_dict()))\n return SearchResult(results)", "def readPipelines(pipelines=default_pipelines):\n with open(pipelines, 'r') as handle:\n return yaml.load(handle, Loader=yaml.FullLoader)", "def schemaless_reader(stream, schema):\n acquaint_schema(schema)\n return read_data(stream, schema)", "def from_msg(cls, msg):\n if cls._debug:\n log.debug('msg=%s', msg)\n key, seq_s, uuid, prop_s, body = msg\n key = key if key else None\n seq = struct.unpack('!q', seq_s)[0]\n body = body if body else None\n if body:\n body = pipeline.load(body)\n #body = json.loads(body_s)\n #prop = json.loads(prop_s)\n prop = pipeline.load(prop_s)\n return cls(seq, uuid=uuid, key=key, properties=prop, body=body)", "def read_stream(\n input_stream: IO[bytes], reader_name: Optional[str] = None, **reader_args: Any\n) -> Reader:\n if reader_name is None:\n reader_name = identify(input_stream)\n\n reader = plugins.call(\n package_name=__name__,\n plugin_name=reader_name,\n input_stream=input_stream,\n **reader_args,\n )\n reader.read()\n return reader", "def parse(self, source):\n command = 'pandoc ' + source + ' -t json'\n proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n res = proc.communicate()\n if res[1]:\n print('PROCESS FAILED. SEE BELOW:')\n print(str(res[1]))\n return None # sending stderr output to user\n else:\n document = json.loads(res[0])\n self.document_parse(document)\n return self.tables, self.text", "def read_yaml(yaml: Union[str, pathlib.Path, IO[Any]]) -> Component:\n c = Component()\n\n yaml = io.StringIO(yaml) if isinstance(yaml, str) and \"\\n\" in yaml else yaml\n conf = OmegaConf.load(yaml)\n\n for component_name in conf:\n component_conf = conf[component_name]\n component_type = component_conf[\"component\"]\n component_settings = component_conf[\"settings\"]\n ci = component_type2factory[component_type](**component_settings)\n component_properties = component_conf[\"properties\"]\n for k, v in component_properties.items():\n setattr(ci, k, v)\n ci.name = component_name\n c << ci\n return c" ]
[ "0.7405631", "0.6758382", "0.64881027", "0.6360592", "0.63516474", "0.63164014", "0.6199864", "0.606954", "0.604542", "0.60424024", "0.60077196", "0.5979247", "0.594493", "0.5858309", "0.5854879", "0.584637", "0.584421", "0.5775051", "0.5773475", "0.5768689", "0.57418305", "0.57371944", "0.5721698", "0.5704608", "0.56633794", "0.56129754", "0.560228", "0.5598455", "0.55818623", "0.5580482", "0.55780435", "0.5558158", "0.5557494", "0.55495363", "0.55161077", "0.55160624", "0.54892546", "0.5472689", "0.54483056", "0.5429306", "0.54023224", "0.5393973", "0.5349111", "0.5326969", "0.5313831", "0.5312805", "0.5298753", "0.5298201", "0.52963966", "0.528687", "0.5285067", "0.5284803", "0.5272822", "0.52578276", "0.52562916", "0.5244613", "0.5243025", "0.5243025", "0.52318674", "0.5224491", "0.52231675", "0.52115506", "0.5189172", "0.5176293", "0.5163061", "0.5156769", "0.515652", "0.5155162", "0.51526314", "0.515093", "0.5146148", "0.5126105", "0.51247233", "0.5117625", "0.509483", "0.50847137", "0.50728756", "0.50675666", "0.5065927", "0.5064336", "0.506259", "0.50141275", "0.49870688", "0.49848104", "0.49845734", "0.49744266", "0.49736676", "0.49659085", "0.4962423", "0.496013", "0.4955068", "0.49489376", "0.49477893", "0.494084", "0.49358052", "0.49191803", "0.49188396", "0.49178317", "0.4915278", "0.49144393" ]
0.6677856
2
Attention based temporal attention forward pass
def forward(self, src, src_t, seq, seq_t, seq_e, mask, vars_dict): src_ext = torch.unsqueeze(src, dim=1) # src [B, 1, D] src_e_ph = torch.zeros_like(src_ext) q = torch.cat([src_ext, src_e_ph, src_t], dim=2) # [B, 1, D + De + Dt] -> [B, 1, D] k = torch.cat([seq, seq_e, seq_t], dim=2) # [B, 1, D + De + Dt] -> [B, 1, D] mask = torch.unsqueeze(mask, dim=2) # mask [B, N, 1] mask = mask.permute([0, 2, 1]) # mask [B, 1, N] # # target-attention output, attn = self.multi_head_target(q=q, k=k, v=k, vars_dict=vars_dict, mask=mask) # output: [B, 1, D + Dt], attn: [B, 1, N] # print('src.shape', src.shape) output = output.squeeze(1) # print('output.shape', output.shape) # print('output', output) # print('output.squeeze().shape', output.shape) attn = attn.squeeze() # output = self.merger(output, src) x = torch.cat([output, src], dim=1) # x = self.layer_norm(x) x = F.relu(F.linear(x, vars_dict['w1_agg_fc'])) output = F.linear(x, vars_dict['w2_agg_fc']) return output, attn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, encoder_state, context, context_lens):\n attn = self.attention(context, encoder_state.squeeze(0), context_lens)\n return attn", "def _Attention(self, name, is_causal=True):\n p = self.params\n tr_atten_p = TransformerAttentionLayer.Params().Set(\n name='transformer_atten',\n input_dim=p.model_dim,\n hidden_dim=p.attention_hidden_dim or p.model_dim,\n is_masked=is_causal,\n num_heads=p.num_heads,\n residual_dropout_prob=p.residual_dropout_prob,\n atten_dropout_prob=p.atten_dropout_prob,\n fprop_dtype=p.fprop_dtype,\n add_unnormalized_input=p.selfatten_add_unnormalized_input,\n )\n tr_atten_p.atten_tpl.use_bias = p.use_bias\n tr_atten_p.atten_tpl.enable_value_proj = p.selfatten_enable_value_proj\n tr_atten_p.atten_tpl.enable_query_scale = p.enable_query_scale\n tr_atten_p.atten_tpl.enable_per_dim_scale = p.enable_per_dim_scale\n tr_atten_p.atten_tpl.device_mesh = p.device_mesh\n tr_atten_p.atten_tpl.weight_split_dims_mapping = (\n p.weight_split_dims_mapping.dnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.blnh = (\n p.activation_split_dims_mapping.blnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.bld = (\n p.activation_split_dims_mapping.bld)\n if p.deterministic_dropout:\n tr_atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n tr_atten_p.atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n\n return self._Graph(\n name,\n ['i'], # input NestedMap with {vec, paddings}\n ['o'], # output NestedMap with {vec, paddings}\n ('i.vec->split_i',\n self.MeshSplit('input_split', p.activation_split_dims_mapping.bld)),\n ('split_i,split_i,i.paddings->o.vec,unused_prob', tr_atten_p),\n ('i.paddings->o.paddings', self._Id('id')))", "def forward(self, graph, feat):\n graph = graph.local_var()\n\n if isinstance(feat, tuple):\n h_src = self.feat_drop(feat[0])\n h_dst = self.feat_drop(feat[1])\n feat_src = self.fc_src(h_src).view(-1, self._num_heads, self._out_feats)\n feat_dst = self.fc_dst(h_dst).view(-1, self._num_heads, self._out_feats)\n else:\n h_src = h_dst = self.feat_drop(feat)\n feat_src = feat_dst = self.fc(h_src).view(-1, self._num_heads,\n self._out_feats)\n el = (feat_src * self.attn_l).sum(dim=-1).unsqueeze(-1)\n er = (feat_dst * self.attn_r).sum(dim=-1).unsqueeze(-1)\n graph.srcdata.update({'ft': feat_src, 'el': el})\n graph.dstdata.update({'er': er})\n # compute edge attention, el and er are a_l Wh_i and a_r Wh_j respectively.\n graph.apply_edges(fn.u_add_v('el', 'er', 'e'))\n e = self.leaky_relu(graph.edata.pop('e'))\n\n one_minus_lambda_sparsemax = (1 - self.lambda_sparsemax)\n lambda_score = e / one_minus_lambda_sparsemax\n\n sparse_attn = edge_sparsemax(graph, lambda_score)\n graph.edata['a'] = self.attn_drop(sparse_attn)\n self.attention_weights = graph.edata['a']\n\n # self.att_wrt_h = compute_att_wrt_h(self.attention_weights, self.fc.weight)\n\n graph.update_all(fn.u_mul_e('ft', 'a', 'm'), fn.sum('m', 'ft'))\n rst = graph.dstdata['ft']\n\n # residual\n if self.res_fc is not None:\n resval = self.res_fc(h_dst).view(h_dst.shape[0], -1, self._out_feats)\n rst = rst + resval\n # activation\n if self.activation:\n rst = self.activation(rst)\n return rst", "def attention(query, step):\n \n if nest.is_sequence(query): # If the query is a tuple, flatten it.\n query_list = nest.flatten(query)\n query = array_ops.concat(query_list, 1)\n _tmp = math_ops.matmul(query, w) + b\n _tmp = array_ops.reshape(_tmp, [-1, 1, 1, attn_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(v * math_ops.tanh(hidden_features + _tmp), [2, 3])\n # beta = math_ops.multiply(nn_ops.softmax(s, name=\"beta_%d\" % step), beta_scalar)\n beta = nn_ops.softmax(s, name=\"beta_%d\" % step)\n # Now calculate the attention-weighted vector d.\n \n hidden_attn = math_ops.reduce_sum(array_ops.reshape(beta, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n return hidden_attn, beta", "def get_attention(hidden_state):\n inputs = tf.concat((hidden_state, processed_input[-1]), axis = 1)\n hidden_values = tf.nn.tanh(tf.matmul(inputs, Wa1) + ba1)\n e_values = (tf.matmul(hidden_values, Wa2) + ba2)\n return e_values", "def attention(inp, scope, e_dim, past, config):\n assert inp.shape.ndims == 3 # input should be of shape [batch, seqlen, embeddings] # [batch, sequence, features]\n assert e_dim % config.num_heads == 0 # embedding can be split in heads\n\n if past is not None:\n assert past.shape.ndims == 5 # [batch, 2, heads, seqlen, emebeddings]\n\n def split_heads(x):\n out = split_into_n_states(x, config.num_heads)\n out = tf.transpose(out, [0, 2, 1, 3])\n return out\n\n def merge_heads(x):\n out = merge_n_states(tf.transpose(x, [0, 2, 1, 3]))\n return out\n\n def mask_attention_weights(w):\n # w should have shape [batches, heads, dst_seq, src_seq], where information flows from scr to dst\n _, _, nd, ns = shapes_list(w)\n b = attention_mask(nd, ns, w.dtype)\n b = tf.reshape(b, [1, 1, nd, ns])\n w = w * b - tf.cast(1e10, w.dtype) * (1 - b)\n return w\n\n def multihead_attention(q, k, v):\n w = tf.matmul(q, k, transpose_b=True)\n w *= tf.rsqrt(tf.cast(v.shape[-1].value, w.dtype))\n\n # mask attention weights\n w = mask_attention_weights(w)\n w = softmax_with_reduce_max(w)\n out = tf.matmul(w, v)\n return out\n\n with tf.variable_scope(scope):\n c = conv1d(inp, 'convolutional_attention', e_dim * 3)\n q, k, v = map(split_heads, tf.split(c, 3, axis=2))\n present = tf.stack([k, v], axis=1)\n if past is not None:\n # there is a stack below it\n pk, pv = tf.unstack(past, axis=1)\n k = tf.concat([pk, k], axis=2)\n v = tf.concat([pv, v], axis=2)\n\n attn = multihead_attention(q, k, v)\n attn = merge_heads(attn)\n\n out = conv1d(attn, 'convolutional_projection', e_dim)\n return out, present", "def __init__(self, encoder_hidden_dim, decoder_hidden_dim, T):\n super(TemporalAttentionDecoder, self).__init__()\n\n self.T = T\n self.encoder_hidden_dim = encoder_hidden_dim\n self.decoder_hidden_dim = decoder_hidden_dim\n\n self.attn_layer = nn.Sequential(nn.Linear(2 * decoder_hidden_dim + encoder_hidden_dim, encoder_hidden_dim),\n nn.ReLU(), nn.Linear(encoder_hidden_dim, 1))\n self.lstm_layer = nn.LSTM(input_size=1, hidden_size=decoder_hidden_dim)\n self.fc = nn.Linear(encoder_hidden_dim + 2, 1)\n self.fc_final = nn.Linear(decoder_hidden_dim + encoder_hidden_dim, 1)\n\n self.fc.weight.data.normal_()", "def _attention(self, inputs):\n attn_weights = K.batch_dot(x=inputs,\n y=K.permute_dimensions(inputs,\n pattern=(0, 2, 1)))\n return K.permute_dimensions(attn_weights, (0, 2, 1))", "def temporal_affine_forward(x, w, b):\n N, T, D = x.shape\n M = b.shape[0]\n out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b\n cache = x, w, b, out\n return out, cache", "def _apply_attention_constraint(e,\n last_attended_idx,\n backward_window=1,\n forward_window=3):\n # for dygraph to static graph\n # if e.shape[0] != 1:\n # raise NotImplementedError(\n # \"Batch attention constraining is not yet supported.\")\n backward_idx = paddle.cast(\n last_attended_idx - backward_window, dtype='int64')\n forward_idx = paddle.cast(last_attended_idx + forward_window, dtype='int64')\n if backward_idx > 0:\n e[:, :backward_idx] = -float(\"inf\")\n if forward_idx < paddle.shape(e)[1]:\n e[:, forward_idx:] = -float(\"inf\")\n return e", "def __init__(self, input_dim=6, hidden_dim=128, T=10, in_features=2*128 + 16, num_layers=1, out_features=1):\n super(InputAttentionEncoder, self).__init__()\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.T = T\n\n in_features = hidden_dim * 2 + T - 1\n\n self.lstm_unit = nn.LSTM(input_size=input_dim, hidden_size=hidden_dim, num_layers=num_layers)\n self.attn_linear = nn.Linear(in_features=in_features, out_features=out_features)\n self.bn1 = nn.BatchNorm1d(out_features)\n self.fc2 = nn.Linear(out_features, out_features)", "def get_attention(self, X):\n if self.bn:\n layer = 16\n else:\n layer = 14\n inputs = [K.learning_phase()] + [self.model.inputs[0]]\n _attention_f = K.function(inputs, [\n self.model.layers[layer].output])\n \n return _attention_f([0] + [X])", "def forward(self, input):\n x = self.emb(input)\n output = self.attention(x)\n # print(\"Idggfghrf \", output.size())\n return output", "def forward(self, h_t, mem):\n\n # ==\n # Compute non-sparse attention\n cur_mem_size = mem.size(0)\n batch_size = mem.size(1)\n\n # Repeat h_t from (batch, hid_dim) to (mem_size, batch, hid_dim)\n # one for each memory slot\n h_repeated = (h_t.clone().unsqueeze(0)\n .repeat(cur_mem_size, 1, 1))\n\n # Concat to [h_t, memory], shape (mem_size, batch, hid_dim*2)\n mlp_h_attn = torch.cat((h_repeated, mem), dim=2)\n\n # (Optional) block past attention gradient\n if self.block_attn_grad_past:\n mlp_h_attn = mlp_h_attn.detach()\n\n # Compute attention via tanh then matmul with weight matrix\n # Weight matrix has shape (hid_dim * 2 , 1)\n # Output (attn_w) has shape (mem_size, batch, 1)\n mlp_h_attn = self.tanh(mlp_h_attn)\n attn_w = torch.matmul(mlp_h_attn, self.w_t)\n\n # ==\n # Sparsify attention: set top-k to non-zero and normalize\n attn_w = attn_w.view(cur_mem_size, batch_size)\n attn_w = self.sparse_attention(attn_w)\n attn_w = attn_w.view(cur_mem_size, batch_size, 1)\n\n # ==\n # Extract from memory using attention\n attn_w_rep = attn_w.repeat(1, 1, self.hidden_size)\n h_mem_w = attn_w_rep * mem # (mem_size, batch, hid_dim)\n\n # Attention-extracted memory information\n m_t = torch.sum(h_mem_w, dim=0) # (batch, hid_dim)\n\n return m_t", "def forward(\n self,\n x: torch.Tensor,\n self_attn_mask: torch.Tensor = None,\n self_attn_padding_mask: torch.Tensor = None,\n need_weights: bool = False,\n pos_bias=None\n ):\n residual = x\n\n if self.layer_norm_first:\n x = self.self_attn_layer_norm(x)\n x, attn, pos_bias = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=self_attn_padding_mask,\n need_weights=False,\n attn_mask=self_attn_mask,\n position_bias=pos_bias\n )\n x = self.dropout1(x)\n x = residual + x\n\n residual = x\n x = self.final_layer_norm(x)\n if self.activation_name == \"glu\":\n x = self.fc1(x)\n else:\n x = self.activation_fn(self.fc1(x))\n x = self.dropout2(x)\n x = self.fc2(x)\n x = self.dropout3(x)\n x = residual + x\n else:\n x, attn, pos_bias = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=self_attn_padding_mask,\n need_weights=need_weights,\n attn_mask=self_attn_mask,\n position_bias=pos_bias\n )\n\n x = self.dropout1(x)\n x = residual + x\n\n x = self.self_attn_layer_norm(x)\n\n residual = x\n if self.activation_name == \"glu\":\n x = self.fc1(x)\n else:\n x = self.activation_fn(self.fc1(x))\n x = self.dropout2(x)\n x = self.fc2(x)\n x = self.dropout3(x)\n x = residual + x\n x = self.final_layer_norm(x)\n\n return x, attn, pos_bias", "def attention(query, use_attention=False):\n attn_weights = []\n ds = [] # Results of attention reads will be stored here.\n for i in xrange(num_heads):\n with variable_scope.variable_scope(\"Attention_%d\" % i):\n y = rnn_cell._linear(query, attention_vec_size, True)\n y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(\n v[i] * math_ops.tanh(hidden_features[i] + y), [2, 3])\n if use_attention is False: # apply mean pooling\n weights = tf.tile(sequence_length, tf.pack([attn_length]))\n weights = array_ops.reshape(weights, tf.shape(s))\n a = array_ops.ones(tf.shape(s), dtype=dtype) / math_ops.to_float(weights)\n # a = array_ops.ones(tf.shape(s), dtype=dtype) / math_ops.to_float(tf.shape(s)[1])\n else:\n a = nn_ops.softmax(s)\n attn_weights.append(a)\n # Now calculate the attention-weighted vector d.\n d = math_ops.reduce_sum(\n array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n ds.append(array_ops.reshape(d, [-1, attn_size]))\n return attn_weights, ds", "def compute_attention(t1, t2):\n dim = t1.shape.as_list()[2]\n init = tf.constant_initializer(1.0 / dim)\n\n t1_logits = ops.last_dim_weighted_sum(t1, \"t1_w\")\n t2_logits = ops.last_dim_weighted_sum(t2, \"t2_w\")\n\n dot_w = tf.get_variable(\n \"dot_w\", shape=dim, initializer=init, dtype=tf.float32)\n # Compute x * dot_weights first, then batch mult with x\n dots = t1 * tf.expand_dims(tf.expand_dims(dot_w, 0), 0)\n dot_logits = tf.matmul(dots, t2, transpose_b=True)\n\n return dot_logits + \\\n tf.expand_dims(t1_logits, 2) + \\\n tf.expand_dims(t2_logits, 1)", "def forward(self, queries, keys, mask=None, attn_prior=None, speaker_embed=None):\n if speaker_embed is not None:\n keys = keys + self.key_spk_proj(speaker_embed.unsqueeze(1).expand(\n -1, keys.shape[-1], -1\n )).transpose(1, 2)\n queries = queries + self.query_spk_proj(speaker_embed.unsqueeze(1).expand(\n -1, queries.shape[-1], -1\n )).transpose(1, 2)\n keys_enc = self.key_proj(keys) # B x n_attn_dims x T2\n queries_enc = self.query_proj(queries)\n\n # Simplistic Gaussian Isotopic Attention\n attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None]) ** 2 # B x n_attn_dims x T1 x T2\n attn = -self.temperature * attn.sum(1, keepdim=True)\n\n if attn_prior is not None:\n #print(f\"AlignmentEncoder \\t| mel: {queries.shape} phone: {keys.shape} mask: {mask.shape} attn: {attn.shape} attn_prior: {attn_prior.shape}\")\n attn = self.log_softmax(attn) + torch.log(attn_prior[:, None] + 1e-8)\n #print(f\"AlignmentEncoder \\t| After prior sum attn: {attn.shape}\")\n\n attn_logprob = attn.clone()\n\n if mask is not None:\n attn.data.masked_fill_(mask.permute(0, 2, 1).unsqueeze(2), -float(\"inf\"))\n\n attn = self.softmax(attn) # softmax along T2\n return attn, attn_logprob", "def forward(Observation, Emission, Transition, Initial):\n\n N, M = Emission.shape\n T = Observation.shape[0]\n\n alpha = np.zeros((N, T))\n alpha[:, 0] = Initial.T * Emission[:, Observation[0]]\n\n for col in range(1, T):\n for row in range(N):\n aux = alpha[:, col - 1] * Transition[:, row]\n alpha[row, col] = np.sum(aux * Emission[row, Observation[col]])\n\n # P = np.sum(alpha[:, -1])\n\n return alpha", "def one_step(i_t, h_tm1):\n h_t = self.activation(T.dot(i_t, self.W) + T.dot(h_tm1, self.W_rec) + self.b)\n return h_t", "def convolutional_attention(inputs, filter_size=5, initializer=None,\n reuse=None, name=''):\n with tf.variable_scope('conv_att_{}'.format(name), reuse=reuse) as f:\n dim = inputs.get_shape().as_list()[2]\n filter_shape = filter_shape = [filter_size, dim, 1]\n W1 = tf.get_variable(\"weights\", filter_shape,\n initializer=initializer)\n b1 = tf.get_variable(\"bias\", [1],\n initializer=tf.constant_initializer([0.1]))\n conv = tf.nn.conv1d(inputs, W1, stride=1,\n padding=\"SAME\", data_format=\"NHWC\")\n # this should be bsz x seq_len x 1\n conv += b1\n att = tf.nn.sigmoid(conv)\n weighted_inputs = inputs * att\n return weighted_inputs, att", "def forward(self, state, encoder_padding_mask):\n residual = state.clone()\n\n '''\n ___QUESTION-6-DESCRIBE-D-START___\n What is the purpose of encoder_padding_mask? What will the output shape of `state' Tensor\n be after multi-head attention? HINT: formulate your answer in terms of\n constituent variables like batch_size, embed_dim etc...\n\n The purpose of encoder_padding_mask is to account for the fact that the\n source sentences in the batch are of different length. The output shape\n of state tensor will be [src_time_steps, batch_size, embed_dim].\n '''\n state, _ = self.self_attn(query=state, key=state, value=state, key_padding_mask=encoder_padding_mask)\n '''\n ___QUESTION-6-DESCRIBE-D-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state", "def make_attention_lstm():\n from tensorflow.keras import activations\n from tensorflow.keras import backend as K\n from tensorflow.keras import constraints, initializers, regularizers\n\n # from keras.legacy import interfaces\n from tensorflow.keras.layers import RNN, InputSpec, Layer\n\n def _time_distributed_dense(\n x,\n w,\n b=None,\n dropout=None,\n input_dim=None,\n output_dim=None,\n timesteps=None,\n training=None,\n ):\n \"\"\"Apply `y . w + b` for every temporal slice y of x.\n\n # Arguments\n x: input tensor.\n w: weight matrix.\n b: optional bias vector.\n dropout: wether to apply dropout (same dropout mask\n for every temporal slice of the input).\n input_dim: integer; optional dimensionality of the input.\n output_dim: integer; optional dimensionality of the output.\n timesteps: integer; optional number of timesteps.\n training: training phase tensor or boolean.\n # Returns\n Output tensor.\n \"\"\"\n if not input_dim:\n input_dim = K.shape(x)[2]\n if not timesteps:\n timesteps = K.shape(x)[1]\n if not output_dim:\n output_dim = K.int_shape(w)[1]\n\n if dropout is not None and 0.0 < dropout < 1.0:\n # apply the same dropout pattern at every timestep\n ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))\n dropout_matrix = K.dropout(ones, dropout)\n expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)\n x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)\n\n # collapse time dimension and batch dimension together\n x = K.reshape(x, (-1, input_dim))\n x = K.dot(x, w)\n if b is not None:\n x = K.bias_add(x, b)\n # reshape to 3D tensor\n if K.backend() == \"tensorflow\":\n x = K.reshape(x, K.stack([-1, timesteps, output_dim]))\n x.set_shape([None, None, output_dim])\n else:\n x = K.reshape(x, (-1, timesteps, output_dim))\n return x\n\n class AttentionLSTMCell(Layer):\n \"\"\"Long-Short Term Memory unit - with Attention.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](keras/activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](keras/activations.md)).\n attention_activation: Activation function to use\n for the attention step. If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n (see [activations](keras/activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n attention_initializer: Initializer for the `attention_kernel` weights\n matrix, used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n use_chrono_initialization: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.]\n (http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n attention_regularizer: Regularizer function applied to\n the `attention_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n attention_constraint: Constraint function applied to\n the `attention_kernel` weights matrix\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n return_attention: Returns the attention vector instead of\n the internal state.\n # References\n - [Long short-term memory]\n (http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)\n (original 1997 paper)\n - [Learning to forget: Continual prediction with LSTM]\n (http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)\n - [Supervised sequence labeling with recurrent neural networks]\n (http://www.cs.toronto.edu/~graves/preprint.pdf)\n - [A Theoretically Grounded Application of Dropout\n in Recurrent Neural Networks]\n (http://arxiv.org/abs/1512.05287)\n - [Bahdanau, Cho & Bengio (2014),\n \"Neural Machine Translation by Jointly Learning to Align and Translate\"]\n (https://arxiv.org/pdf/1409.0473.pdf)\n - [Xu, Ba, Kiros, Cho, Courville, Salakhutdinov, Zemel & Bengio (2016)\n \"Show, Attend and Tell: Neural Image Caption Generation\n with Visual Attention\"]\n (http://arxiv.org/pdf/1502.03044.pdf)\n \"\"\"\n\n _tags = {\"python_dependencies\": \"tensorflow\"}\n\n def __init__(\n self,\n units,\n activation=\"tanh\",\n recurrent_activation=\"hard_sigmoid\",\n attention_activation=\"tanh\",\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n recurrent_initializer=\"orthogonal\",\n attention_initializer=\"orthogonal\",\n bias_initializer=\"zeros\",\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n attention_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n attention_constraint=None,\n dropout=0.0,\n recurrent_dropout=0.0,\n return_attention=False,\n implementation=1,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.input_spec = [InputSpec(ndim=2)]\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.attention_activation = activations.get(attention_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.attention_initializer = initializers.get(attention_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.unit_forget_bias = unit_forget_bias\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.attention_regularizer = regularizers.get(attention_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.attention_constraint = constraints.get(attention_constraint)\n\n self.dropout = min(1.0, max(0.0, dropout))\n self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))\n self.return_attention = return_attention\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n self.implementation = implementation\n self.state_spec = [\n InputSpec(shape=(None, self.units)),\n InputSpec(shape=(None, self.units)),\n ]\n self.state_size = (self.units, self.units)\n\n def build(self, input_shape):\n \"\"\"Build the AttentionLSTMCell object.\"\"\"\n if hasattr(self, \"timesteps\") and self.timesteps is not None:\n self.timestep_dim = self.timesteps\n else:\n self.timestep_dim = 1 # input_shape[0]\n\n self.input_dim = input_shape[-1]\n\n self.kernel = self.add_weight(\n shape=(self.input_dim, self.units * 4),\n name=\"kernel\",\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n )\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 4),\n name=\"recurrent_kernel\",\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n )\n\n # add attention kernel\n self.attention_kernel = self.add_weight(\n shape=(self.input_dim, self.units * 4),\n name=\"attention_kernel\",\n initializer=self.attention_initializer,\n regularizer=self.attention_regularizer,\n constraint=self.attention_constraint,\n )\n\n # add attention weights\n # weights for attention model\n self.attention_weights = self.add_weight(\n shape=(self.input_dim, self.units),\n name=\"attention_W\",\n initializer=self.attention_initializer,\n regularizer=self.attention_regularizer,\n constraint=self.attention_constraint,\n )\n\n self.attention_recurrent_weights = self.add_weight(\n shape=(self.units, self.units),\n name=\"attention_U\",\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n )\n\n if self.use_bias:\n if self.unit_forget_bias:\n\n def bias_initializer(shape, *args, **kwargs):\n return K.concatenate(\n [\n self.bias_initializer((self.units,), *args, **kwargs),\n initializers.Ones()((self.units,), *args, **kwargs),\n self.bias_initializer(\n (self.units * 2,), *args, **kwargs\n ),\n ]\n )\n\n else:\n bias_initializer = self.bias_initializer\n self.bias = self.add_weight(\n shape=(self.units * 4,),\n name=\"bias\",\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n\n self.attention_bias = self.add_weight(\n shape=(self.units,),\n name=\"attention_b\",\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n\n self.attention_recurrent_bias = self.add_weight(\n shape=(self.units, 1),\n name=\"attention_v\",\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n else:\n self.bias = None\n self.attention_bias = None\n self.attention_recurrent_bias = None\n\n self.kernel_i = self.kernel[:, : self.units]\n self.kernel_f = self.kernel[:, self.units : self.units * 2]\n self.kernel_c = self.kernel[:, self.units * 2 : self.units * 3]\n self.kernel_o = self.kernel[:, self.units * 3 :]\n\n self.recurrent_kernel_i = self.recurrent_kernel[:, : self.units]\n self.recurrent_kernel_f = self.recurrent_kernel[\n :, self.units : self.units * 2\n ]\n self.recurrent_kernel_c = self.recurrent_kernel[\n :, self.units * 2 : self.units * 3\n ]\n self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3 :]\n\n self.attention_i = self.attention_kernel[:, : self.units]\n self.attention_f = self.attention_kernel[:, self.units : self.units * 2]\n self.attention_c = self.attention_kernel[:, self.units * 2 : self.units * 3]\n self.attention_o = self.attention_kernel[:, self.units * 3 :]\n\n if self.use_bias:\n self.bias_i = self.bias[: self.units]\n self.bias_f = self.bias[self.units : self.units * 2]\n self.bias_c = self.bias[self.units * 2 : self.units * 3]\n self.bias_o = self.bias[self.units * 3 :]\n else:\n self.bias_i = None\n self.bias_f = None\n self.bias_c = None\n self.bias_o = None\n\n self.built = True\n\n def _generate_dropout_mask(self, inputs, training=None):\n if 0 < self.dropout < 1:\n ones = K.ones_like(K.squeeze(inputs[:, 0:1, :], axis=1))\n\n def dropped_inputs():\n return K.dropout(ones, self.dropout)\n\n self._dropout_mask = [\n K.in_train_phase(dropped_inputs, ones, training=training)\n for _ in range(4)\n ]\n else:\n self._dropout_mask = None\n\n def _generate_recurrent_dropout_mask(self, inputs, training=None):\n if 0 < self.recurrent_dropout < 1:\n ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))\n ones = K.tile(ones, (1, self.units))\n\n def dropped_inputs():\n return K.dropout(ones, self.dropout)\n\n self._recurrent_dropout_mask = [\n K.in_train_phase(dropped_inputs, ones, training=training)\n for _ in range(4)\n ]\n else:\n self._recurrent_dropout_mask = None\n\n def call(self, inputs, states, training=None):\n \"\"\"Call the AttentionLSTMCell.\"\"\"\n # dropout matrices for input units\n dp_mask = self._dropout_mask\n # dropout matrices for recurrent units\n rec_dp_mask = self._recurrent_dropout_mask\n\n h_tm1 = states[0] # previous memory state\n c_tm1 = states[1] # previous carry state\n\n # alignment model\n h_att = K.repeat(h_tm1, self.timestep_dim)\n att = _time_distributed_dense(\n inputs,\n self.attention_weights,\n self.attention_bias,\n input_dim=self.input_dim,\n output_dim=self.units,\n timesteps=self.timestep_dim,\n )\n attention_ = self.attention_activation(\n K.dot(h_att, self.attention_recurrent_weights) + att\n ) # energy\n attention_ = K.squeeze(\n K.dot(attention_, self.attention_recurrent_bias), 2\n ) # energy\n\n alpha = K.exp(attention_)\n\n if dp_mask is not None:\n alpha *= dp_mask[0]\n\n alpha /= K.sum(alpha, axis=1, keepdims=True)\n alpha_r = K.repeat(alpha, self.input_dim)\n alpha_r = K.permute_dimensions(alpha_r, (0, 2, 1))\n\n # make context vector (soft attention after Bahdanau et al.)\n z_hat = inputs * alpha_r\n # context_sequence = z_hat\n z_hat = K.sum(z_hat, axis=1)\n\n if self.implementation == 1:\n if 0 < self.dropout < 1.0:\n inputs_i = inputs * dp_mask[0]\n inputs_f = inputs * dp_mask[1]\n inputs_c = inputs * dp_mask[2]\n inputs_o = inputs * dp_mask[3]\n else:\n inputs_i = inputs\n inputs_f = inputs\n inputs_c = inputs\n inputs_o = inputs\n x_i = K.dot(inputs_i, self.kernel_i)\n x_f = K.dot(inputs_f, self.kernel_f)\n x_c = K.dot(inputs_c, self.kernel_c)\n x_o = K.dot(inputs_o, self.kernel_o)\n if self.use_bias:\n x_i = K.bias_add(x_i, self.bias_i)\n x_f = K.bias_add(x_f, self.bias_f)\n x_c = K.bias_add(x_c, self.bias_c)\n x_o = K.bias_add(x_o, self.bias_o)\n\n if 0 < self.recurrent_dropout < 1.0:\n h_tm1_i = h_tm1 * rec_dp_mask[0]\n h_tm1_f = h_tm1 * rec_dp_mask[1]\n h_tm1_c = h_tm1 * rec_dp_mask[2]\n h_tm1_o = h_tm1 * rec_dp_mask[3]\n else:\n h_tm1_i = h_tm1\n h_tm1_f = h_tm1\n h_tm1_c = h_tm1\n h_tm1_o = h_tm1\n i = self.recurrent_activation(\n x_i\n + K.dot(h_tm1_i, self.recurrent_kernel_i)\n + K.dot(z_hat, self.attention_i)\n )\n f = self.recurrent_activation(\n x_f\n + K.dot(h_tm1_f, self.recurrent_kernel_f)\n + K.dot(z_hat, self.attention_f)\n )\n c = f * c_tm1 + i * self.activation(\n x_c\n + K.dot(h_tm1_c, self.recurrent_kernel_c)\n + K.dot(z_hat, self.attention_c)\n )\n o = self.recurrent_activation(\n x_o\n + K.dot(h_tm1_o, self.recurrent_kernel_o)\n + K.dot(z_hat, self.attention_o)\n )\n else:\n if 0.0 < self.dropout < 1.0:\n inputs *= dp_mask[0]\n z = K.dot(inputs, self.kernel)\n if 0.0 < self.recurrent_dropout < 1.0:\n h_tm1 *= rec_dp_mask[0]\n z += K.dot(h_tm1, self.recurrent_kernel)\n z += K.dot(z_hat, self.attention_kernel)\n\n if self.use_bias:\n z = K.bias_add(z, self.bias)\n\n z0 = z[:, : self.units]\n z1 = z[:, self.units : 2 * self.units]\n z2 = z[:, 2 * self.units : 3 * self.units]\n z3 = z[:, 3 * self.units :]\n\n i = self.recurrent_activation(z0)\n f = self.recurrent_activation(z1)\n c = f * c_tm1 + i * self.activation(z2)\n o = self.recurrent_activation(z3)\n\n h = o * self.activation(c)\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None:\n h._uses_learning_phase = True\n return h, [h, c]\n\n class AttentionLSTM(RNN):\n \"\"\"Long-Short Term Memory unit - with Attention.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](keras/activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](keras/activations.md)).\n attention_activation: Activation function to use\n for the attention step. If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n (see [activations](keras/activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n attention_initializer: Initializer for the `attention_kernel` weights\n matrix, used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n use_chrono_initialization: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.]\n (http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n attention_regularizer: Regularizer function applied to\n the `attention_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n attention_constraint: Constraint function applied to\n the `attention_kernel` weights matrix\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n return_attention: Returns the attention vector instead of\n the internal state.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n # References\n - [Long short-term memory]\n (http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)\n (original 1997 paper)\n - [Learning to forget: Continual prediction with LSTM]\n (http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)\n - [Supervised sequence labeling with recurrent neural networks]\n (http://www.cs.toronto.edu/~graves/preprint.pdf)\n - [A Theoretically Grounded Application of Dropout\n in Recurrent Neural Networks]\n (http://arxiv.org/abs/1512.05287)\n - [Bahdanau, Cho & Bengio (2014)\n \"Neural Machine Translation by Jointly Learning to Align and Translate\"]\n (https://arxiv.org/pdf/1409.0473.pdf)\n - [Xu, Ba, Kiros, Cho, Courville, Salakhutdinov, Zemel & Bengio (2016)\n \"Show, Attend and Tell: Neural Image Caption Generation\n with Visual Attention\"]\n (http://arxiv.org/pdf/1502.03044.pdf)\n \"\"\"\n\n _tags = {\"python_dependencies\": \"tensorflow\"}\n\n # '@interfaces.legacy_recurrent_support\n def __init__(\n self,\n units,\n activation=\"tanh\",\n recurrent_activation=\"hard_sigmoid\",\n attention_activation=\"tanh\",\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n recurrent_initializer=\"orthogonal\",\n attention_initializer=\"orthogonal\",\n bias_initializer=\"zeros\",\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n attention_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n attention_constraint=None,\n dropout=0.0,\n recurrent_dropout=0.0,\n implementation=1,\n return_sequences=False,\n return_state=False,\n return_attention=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs,\n ):\n import warnings\n\n if implementation == 0:\n warnings.warn(\n \"`implementation=0` has been deprecated, \"\n \"and now defaults to `implementation=1`.\"\n \"Please update your layer call.\",\n stacklevel=2,\n )\n implementation = 1\n\n if K.backend() == \"cntk\":\n if not kwargs.get(\"unroll\") and (dropout > 0 or recurrent_dropout > 0):\n warnings.warn(\n \"RNN dropout is not supported with the CNTK backend \"\n \"when using dynamic RNNs (i.e. non-unrolled). \"\n \"You can either set `unroll=True`, \"\n \"set `dropout` and `recurrent_dropout` to 0, \"\n \"or use a different backend.\",\n stacklevel=2,\n )\n dropout = 0.0\n recurrent_dropout = 0.0\n\n cell = AttentionLSTMCell(\n units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n attention_activation=attention_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n attention_initializer=attention_initializer,\n bias_initializer=bias_initializer,\n unit_forget_bias=unit_forget_bias,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n attention_regularizer=attention_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n attention_constraint=attention_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n return_attention=return_attention,\n implementation=implementation,\n )\n super().__init__(\n cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs,\n )\n self.return_attention = return_attention\n\n def build(self, input_shape):\n \"\"\"Build the AttentionLSTM object.\"\"\"\n self.cell.timesteps = input_shape[1]\n self.cell.build(input_shape)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n \"\"\"Call the AttentionLSTM object.\"\"\"\n self.cell._generate_dropout_mask(inputs, training=training)\n self.cell._generate_recurrent_dropout_mask(inputs, training=training)\n return super().call(\n inputs, mask=mask, training=training, initial_state=initial_state\n )\n\n @property\n def units(self):\n \"\"\"Return property units.\"\"\"\n return self.cell.units\n\n @property\n def activation(self):\n \"\"\"Return property activation.\"\"\"\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n \"\"\"Return property recurrent_activation.\"\"\"\n return self.cell.recurrent_activation\n\n @property\n def attention_activation(self):\n \"\"\"Return property attention_activation.\"\"\"\n return self.cell.attention_activation\n\n @property\n def use_bias(self):\n \"\"\"Return property use_bias.\"\"\"\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n \"\"\"Return property kernel_initializer.\"\"\"\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n \"\"\"Return property recurrent_initializer.\"\"\"\n return self.cell.recurrent_initializer\n\n @property\n def attention_initializer(self):\n \"\"\"Return property attention_initializer.\"\"\"\n return self.cell.attention_initializer\n\n @property\n def bias_initializer(self):\n \"\"\"Return property bias_initializer.\"\"\"\n return self.cell.bias_initializer\n\n @property\n def unit_forget_bias(self):\n \"\"\"Return property unit_forget_bias.\"\"\"\n return self.cell.unit_forget_bias\n\n @property\n def kernel_regularizer(self):\n \"\"\"Return property kernel_regularizer.\"\"\"\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n \"\"\"Return property recurrent_regularizer.\"\"\"\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n \"\"\"Return property bias_regularizer.\"\"\"\n return self.cell.bias_regularizer\n\n @property\n def activity_regularizer(self):\n \"\"\"Return property activity_regularizer.\"\"\"\n return self.cell.activity_regularizer\n\n @property\n def attention_regularizer(self):\n \"\"\"Return property attention_regularizer.\"\"\"\n return self.cell.attention_regularizer\n\n @property\n def kernel_constraint(self):\n \"\"\"Return property kernel_constraint.\"\"\"\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n \"\"\"Return property recurrent_constraint.\"\"\"\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n \"\"\"Return property bias_constraint.\"\"\"\n return self.cell.bias_constraint\n\n @property\n def attention_constraint(self):\n \"\"\"Return property attention_constraint.\"\"\"\n return self.cell.attention_constraint\n\n @property\n def dropout(self):\n \"\"\"Return property dropout.\"\"\"\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n \"\"\"Return property recurrent_dropout.\"\"\"\n return self.cell.recurrent_dropout\n\n @property\n def implementation(self):\n \"\"\"Return property implementation.\"\"\"\n return self.cell.implementation\n\n def get_config(self):\n \"\"\"Return configuration dict of the AttentionLSTM object.\"\"\"\n config = {\n \"units\": self.units,\n \"activation\": activations.serialize(self.activation),\n \"recurrent_activation\": activations.serialize(\n self.recurrent_activation\n ),\n \"attention_activation\": activations.serialize(\n self.attention_activation\n ),\n \"use_bias\": self.use_bias,\n \"kernel_initializer\": initializers.serialize(self.kernel_initializer),\n \"recurrent_initializer\": initializers.serialize(\n self.recurrent_initializer\n ),\n \"bias_initializer\": initializers.serialize(self.bias_initializer),\n \"attention_initializer\": initializers.serialize(\n self.attention_initializer\n ),\n \"use_chrono_initialization\": self.unit_forget_bias,\n \"kernel_regularizer\": regularizers.serialize(self.kernel_regularizer),\n \"recurrent_regularizer\": regularizers.serialize(\n self.recurrent_regularizer\n ),\n \"bias_regularizer\": regularizers.serialize(self.bias_regularizer),\n \"activity_regularizer\": regularizers.serialize(\n self.activity_regularizer\n ),\n \"attention_regularizer\": regularizers.serialize(\n self.attention_regularizer\n ),\n \"kernel_constraint\": constraints.serialize(self.kernel_constraint),\n \"recurrent_constraint\": constraints.serialize(\n self.recurrent_constraint\n ),\n \"bias_constraint\": constraints.serialize(self.bias_constraint),\n \"attention_constraint\": constraints.serialize(\n self.attention_constraint\n ),\n \"dropout\": self.dropout,\n \"recurrent_dropout\": self.recurrent_dropout,\n \"return_attention\": self.return_attention,\n }\n base_config = super().get_config()\n del base_config[\"cell\"]\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Create a new AttentionLSTM object from a configuration dict.\"\"\"\n if \"implementation\" in config and config[\"implementation\"] == 0:\n config[\"implementation\"] = 1\n return cls(**config)\n\n return AttentionLSTM", "def _compute_attention(attention_mechanism, initial_state, previous_alignments,\n attention_layer):\n alignments, final_state = attention_mechanism(\n initial_state, previous_alignments=previous_alignments)\n\n # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]\n expanded_alignments = tf.expand_dims(alignments, 1)\n # Context is the inner product of alignments and values along the\n # memory time dimension.\n # alignments shape is\n # [batch_size, 1, memory_time]\n # attention_mechanism.values shape is\n # [batch_size, memory_time, memory_size]\n # the batched matmul is over memory_time, so the output shape is\n # [batch_size, 1, memory_size].\n # we then squeeze out the singleton dim.\n context = tf.matmul(expanded_alignments, attention_mechanism.values)\n context = tf.squeeze(context, [1])\n\n if attention_layer is not None:\n attention = attention_layer(context)\n else:\n attention = context\n\n return attention, alignments, final_state", "def forward(self, state, encoder_padding_mask):\n residual = state.clone()\n\n '''\n ___QUESTION-6-DESCRIBE-D-START___\n What is the purpose of encoder_padding_mask? What will the output shape of `state' Tensor \n be after multi-head attention? HINT: formulate your answer in terms of \n constituent variables like batch_size, embed_dim etc...\n '''\n '''\n The encoder padding mask is used to mask the ⟨pad⟩ token which is padded to the input sequences to make the sequences in the same lengths each batch. Thus the word of input sequence will not pay attention to these padded tokens.\n The shape of state is (tgt_time_steps * batch_size * embed_dim)\n '''\n state, _ = self.self_attn(query=state, key=state, value=state, key_padding_mask=encoder_padding_mask)\n '''\n ___QUESTION-6-DESCRIBE-D-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state", "def forward(self, x):\n\n padding_mask = (x == self.tokenizer.vocab['[PAD]'])\n\n x = x.transpose(0, 1).contiguous()\n\n positions = torch.arange(len(x), device=x.device).unsqueeze(-1)\n h = self.tokens_embeddings(x)\n h = h + self.position_embeddings(positions).expand_as(h)\n h = self.dropout(h)\n\n attn_mask = None\n if self.causal:\n attn_mask = torch.full((len(x), len(x)), -float('Inf'), device=h.device, dtype=h.dtype)\n attn_mask = torch.triu(attn_mask, diagonal=1)\n\n for (layer_norm_1, attention, adapter_1, layer_norm_2, feed_forward, adapter_2) \\\n in zip(self.layer_norms_1, self.attentions, self.adapters_1,\n self.layer_norms_2, self.feed_forwards, self.adapters_2):\n h = layer_norm_1(h)\n x, _ = attention(h, h, h, attn_mask=attn_mask, need_weights=False, key_padding_mask=padding_mask)\n x = self.dropout(x)\n\n x = adapter_1(x) + x # Add an adapter with a skip-connection after attention module\n\n h = x + h\n\n h = layer_norm_2(h)\n x = feed_forward(h)\n x = self.dropout(x)\n\n x = adapter_2(x) + x # Add an adapter with a skip-connection after feed-forward module\n\n h = x + h\n return h", "def forward(self, h_prev, x_t):\n from scipy.special import softmax\n # softmax(arr, axis=0)\n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n cat = np.concatenate((h_prev, x_t), axis=1)\n # print('meow', cat.shape)\n h_next = np.tanh(cat @ self.Wh + self.bh)\n y = self.softmax(h_next @ self.Wy + self.by)\n return h_next, y\n\n\n\n\n\n\n\n\n\n\n\n m, i = x_t.shape\n U = self.Wh[:i]\n W = self.Wh[i:]\n x = x_t\n T = len(x_t)\n # During forward propagation we save all hidden states in s because need them later.\n # We add one additional element for the initial hidden, which we set to 0\n s = np.zeros((T + 1, len(self.Wh[:self.Wh.shape[1]]) ))\n s[-1] = np.zeros(self.Wh.shape[1])\n # The outputs at each time step. Again, we save them for later.\n o = np.zeros((T, len(self.Wh[:self.Wh.shape[1]])))\n # For each time step...\n for t in np.arange(T):\n # Note that we are indxing U by x[t]. This is the same as multiplying U with a one-hot vector.\n #s[t] = np.tanh(U[:, x_t[]] + W.dot(s[t - 1]))\n o[t] = softmax(self.V.dot(s[t]))\n return s, o\n \n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n print(\"wi\", Wi.shape, \"wh\", Wh.shape)\n print(\"wh\", self.Wh.shape, \"wy\", self.Wy.shape)\n print(\"bh\", self.bh.shape, \"by\", self.by.shape)\n print(\"xtshape\", x_t.shape, \"hprev\", h_prev.shape)\n print(\"one\", self.Wh[:i].shape)\n one = self.Wy.dot(x_t)# np.dot(x_t, Wh) # x_t.dot(self.Wh[:i])\n two = h_prev @ Wh # h_prev.dot(self.Wh[i:])\n sum = one + two\n h_next = np.tanh(sum + self.bh)\n soft = h_next @ self.Wy\n y = self.softmax(soft) # + self.by)\n return h_next, y", "def calc_attention(self, encoder_hidden_states):\n\n params = self.dec_params\n if len(encoder_hidden_states.shape) == 3:\n # Squeeze the first dimension\n encoder_hidden_states = np.squeeze(encoder_hidden_states, axis=0)\n\n # T x Attn_vec_size\n attn_enc_term = np.matmul(encoder_hidden_states, params.attn_enc_w)\n\n def attention(dec_state):\n attn_dec_term = (np.matmul(dec_state, params.attn_dec_w) +\n params.attn_dec_b) # T x A\n attn_sum = np.tanh(attn_enc_term + attn_dec_term) # T x A\n attn_logits = np.squeeze(np.matmul(attn_sum, params.attn_v)) # T\n attn_probs = softmax(attn_logits)\n\n context_vec = np.matmul(attn_probs, encoder_hidden_states)\n # The attention probabilities are necessary for coverage penalty calculation\n return (context_vec, attn_probs)\n\n return attention", "def forward_pass(self, h_tm1, x_t): # Function though to be used by tf.scan\n\n # Convert vector-tensor form into matrix-tensor form\n x_t = tf.reshape(x_t, shape=[1, -1])\n h_tm1 = tf.reshape(h_tm1, shape=[1, -1])\n\n # Definitions of z_t and r_t\n z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)\n r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)\n\n # Definition of h~_t\n h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)\n\n # Compute the next hidden state\n h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)\n\n return tf.squeeze(h_t)", "def task_specific_attention(inputs, output_size, sequence_lengths,\n initializer=layers.xavier_initializer(),\n activation_fn=tf.tanh, scope=None):\n assert len(inputs.get_shape()) == 3 and inputs.get_shape()[-1].value is not None\n\n with tf.variable_scope(scope or 'attention') as scope:\n attention_context_vector = tf.get_variable(name='attention_context_vector',\n shape=[output_size],\n initializer=initializer,\n dtype=tf.float32)\n \n input_projection = layers.fully_connected(inputs, output_size,\n activation_fn=activation_fn,\n scope=scope)\n\n vector_attn = tf.reduce_sum(tf.multiply(input_projection, attention_context_vector), axis=2) \n mask = tf.sequence_mask(sequence_lengths, dtype=tf.float32) \n attention_weights = tf.nn.softmax(vector_attn, axis=1)\n attention_weights = attention_weights*mask\n norms = tf.reduce_sum(attention_weights, axis = 1, keepdims = True) + 1e-6 \n attention_weights = attention_weights / norms\n attention_weights = tf.expand_dims(attention_weights, axis = 2) \n \n weighted_projection = inputs*attention_weights\n outputs = tf.reduce_sum(weighted_projection, axis=1)\n\n return outputs", "def forward(self, x, mask):\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n #print('encoder')\n #print(x.shape)\n return self.sublayer[1](x, self.feed_forward)", "def apply_attention(inputs,\n attention_mode=None,\n attention_in=None,\n use_5d_mode=False,\n data_format='channels_last'):\n assert data_format == 'channels_last'\n\n h_ch_loc = 2 if use_5d_mode else 1\n\n if attention_mode == 'peer':\n attn = softmax_merge_peer_attentions(attention_in, data_format)\n else:\n attn = tf.reduce_mean(inputs, [h_ch_loc, h_ch_loc+1])\n attn = tf.layers.dense(\n inputs=attn,\n units=inputs.shape[-1],\n kernel_initializer=tf.random_normal_initializer(stddev=.01))\n attn = tf.math.sigmoid(attn)\n channel_attn = tf.expand_dims(tf.expand_dims(attn, h_ch_loc), h_ch_loc)\n\n inputs = tf.multiply(inputs, channel_attn)\n\n return inputs", "def inter_weighted_attention(sentence, # [batch_size, timestep, embed_size]\n other_sentence_vec, # [batch_size, 1, embed_size]\n reuse=None):\n bs, timestep, embed_size1 = sentence.get_shape().as_list()\n bs, ts, embed_size2 = other_sentence_vec.get_shape().as_list()\n assert ts == 1\n embed_size = embed_size1 + embed_size2\n\n with tf.variable_scope('inter_weighted_attention', reuse=reuse):\n inputs = tf.reshape(\n tf.concat([sentence, tf.tile(other_sentence_vec, [1, timestep, 1])], axis=2),\n [-1, embed_size]) # [batch_size * timestep, embed_size1 + embed_size2]\n hidden_size = embed_size\n w1 = tf.get_variable('weight1', [embed_size, hidden_size],\n initializer=tf.contrib.layers.xavier_initializer())\n w2 = tf.get_variable('weight2', [hidden_size, 1],\n initializer=tf.contrib.layers.xavier_initializer())\n hidden_output = tf.tanh(tf.matmul(inputs, w1)) # [batch_size * timestep, hidden_size]\n attention_output = tf.nn.softmax(tf.matmul(hidden_output, w2)) # [batch_size * timestep, 1]\n attention_output = tf.reshape(attention_output, [-1, timestep, 1])\n # attention = tf.squeeze(attention_output, axis=2)\n\n return attention_output", "def forward(self, t):\n x = self.embeddings(t)\n logits = self.model(x.view(x.shape[0], -1))\n return logits", "def forward(self, observation, action):\n\n # Encode the inputs\n observation_embedding = self.observation_embedding(observation.float())\n action_embedding = self.action_embedding(action.float())\n \n observation_action_embedding = torch.cat((observation_embedding, action_embedding),dim=1)\n\n # Attention\n query = self.q_projection(observation_action_embedding).permute(1,0,2)\n key = self.k_projection(observation_action_embedding).permute(1,0,2)\n value = self.v_projection(observation_action_embedding).permute(1,0,2)\n\n x = self.attention(query, key, value)[0].permute(1,0,2)[:,0,:]\n\n x = self.predict(x)\n \n return x", "def attention(self, decoder_state, coverage=None):\n with tf.variable_scope(\"attention_compute\"):\n attn_size = 2*self.hidden_dim\n batch_size = tf.shape(self.encoder_states)[0]\n # Reshape encoder_states (need to insert a dim)\n encoder_states = tf.expand_dims(self.encoder_states, axis=2) # now is shape (batch_size, attn_len, 1, attn_size)\n # To calculate attention, we calculate v^T tanh(W_h h_i + W_s s_t + b_attn)\n # where h_i is an encoder state, and s_t a decoder state.\n # attn_vec_size is the length of the vectors v, b_attn, (W_h h_i) and (W_s s_t).\n # We set it to be equal to the size of the encoder states.\n attention_vec_size = attn_size\n\n # Get the weight matrix W_h and apply it to each encoder state to get (W_h h_i), the encoder features\n W_h = tf.get_variable(\"W_h\", [1, 1, attn_size, attention_vec_size])\n encoder_features = tf.nn.conv2d(encoder_states, W_h, [1, 1, 1, 1], \"SAME\") # shape (batch_size,attn_length,1,attention_vec_size)\n\n # Pass the decoder state through a linear layer (this is W_s s_t + b_attn in the paper)\n decoder_features = self.linear(decoder_state, attention_vec_size, True) # shape (batch_size, attention_vec_size)\n decoder_features = tf.expand_dims(tf.expand_dims(decoder_features, 1), 1) # reshape to (batch_size, 1, 1, attention_vec_size)\n\n w_c = tf.get_variable(\"w_cvg\", [1, 1, 1, attention_vec_size])\n if self.use_coverage and coverage is not None: # non-first step of coverage\n # Multiply coverage vector by w_c to get coverage_features.\n coverage_features = tf.nn.conv2d(coverage, w_c, [1, 1, 1, 1],\n \"SAME\") # c has shape (batch_size, attn_length, 1, attention_vec_size)\n\n # Calculate v^T tanh(W_h h_i + W_s s_t + w_c c_i^t + b_attn)\n e = tf.reduce_sum(tf.tanh(encoder_features + decoder_features + coverage_features), [2, 3]) # calculate e\n\n # Calculate attention distribution\n attn_dist = tf.nn.softmax(e * self.enc_padding_mask) # masked_attention(e)\n masked_sums = tf.reduce_sum(attn_dist, axis=1) # shape (batch_size)\n attn_dist /= tf.reshape(masked_sums, [-1, 1]) # re-normalize\n # Update coverage vector\n coverage += tf.reshape(attn_dist, [tf.shape(self.encoder_states)[0], -1, 1, 1])\n else:\n # Calculate v^T tanh(W_h h_i + W_s s_t + b_attn)\n e = tf.reduce_sum(tf.tanh(encoder_features + decoder_features), [2, 3]) # calculate e\n # Calculate attention distribution\n attn_dist = tf.nn.softmax(e * self.enc_padding_mask) # masked_attention(e)\n masked_sums = tf.reduce_sum(attn_dist, axis=1) # shape (batch_size)\n attn_dist /= tf.reshape(masked_sums, [-1, 1]) # re-normalize\n if self.use_coverage: # first step of training\n coverage = tf.expand_dims(tf.expand_dims(attn_dist, 2), 2) # initialize coverage\n\n # Calculate the context vector from attn_dist and encoder_states\n context_vector = tf.reduce_sum(tf.reshape(attn_dist, [tf.shape(self.encoder_states)[0], -1, 1, 1]) * encoder_states, [1, 2]) # shape (batch_size, attn_size).\n context_vector = tf.reshape(context_vector, [-1, attn_size])\n\n return context_vector, attn_dist, coverage", "def forward(self, state, action):\n\n # Prepare the embeddings\n state_embedding = self.state_embedding(state.float())\n state_embedding = state_embedding.repeat(1, action.shape[1], 1)\n action_embedding = self.action_embedding(action.float())\n state_action_embedding = torch.cat((state_embedding, action_embedding),dim=2)\n\n # Attention\n query = self.q_projection(state_action_embedding).permute(1,0,2)\n key = self.k_projection(state_action_embedding).permute(1,0,2)\n value = self.v_projection(state_action_embedding).permute(1,0,2)\n \n x = self.attention(query, key, value)[0].permute(1,0,2)[:,0,:]\n\n # Predict the next state\n x = self.predict(x)\n \n return x", "def forward(self, observation: Tensor) -> Tensor:\n pass", "def attn(self, embed, mask, name=\"\"):\n with tf.variable_scope(\"attn_\"+name):\n K = self.get_trans_param(\"K\")\n Q = self.get_trans_param(\"Q\")\n V = self.get_trans_param(\"V\")\n kdata = tf.einsum(\"nml,lk->nmk\", embed, K)\n qdata = tf.einsum(\"nml,lk->nmk\", embed, Q)\n vdata = tf.einsum(\"nml,lk->nmk\", embed, V) # nbatch x max_atom x n_trans\n kq = tf.einsum(\"nml,nkl->nmk\", qdata, kdata)*(1/math.sqrt(self.n_trans)) # nbatch x max_atom x max_atom\n #mask = tf.expand_dims(mask, 1) # nbatch x 1 x max_atom\n mask = tf.keras.backend.repeat(mask, self.max_atom) \n score = tf.where(mask, -9999*tf.ones_like(kq), kq)\n #score = kq\n #score = tf.scatter_update(tf.Variable(kq, validate_shape=False), mask, -9999)# assign a large number\n w = tf.nn.softmax(score, axis=-1) # calculate attention weight, nbatch x max_atom x max_atom\n vout = tf.einsum(\"nml,nlk->nmk\", w, vdata) # nbatch x max_atom x n_trans\n return vout", "def forward(self,\n state,\n encoder_out=None,\n encoder_padding_mask=None,\n incremental_state=None,\n prev_self_attn_state=None,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n need_attn=False,\n need_head_weights=False):\n\n # need_attn must be True if need_head_weights\n need_attn = True if need_head_weights else need_attn\n print('encoder padding {}, self padding {}'.format(encoder_padding_mask, self_attn_padding_mask.size()))\n residual = state.clone()\n # print('self attention')\n state, _ = self.self_attn(query=state,\n key=state,\n value=state,\n key_padding_mask=self_attn_padding_mask,\n need_weights=False,\n attn_mask=self_attn_mask)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n '''\n ___QUESTION-6-DESCRIBE-E-START___\n How does encoder attention differ from self attention? What is the difference between key_padding_mask \n and attn_mask? If you understand this difference, then why don't we need to give attn_mask here?\n '''\n '''\n The encoder attention is making the target input word pay attention to the source sequence from encoder, while the self attention is making the input word pay attention to the words in other positions of the input sequence.\n The key_padding mask masks padded tokens ⟨pad⟩ so the model does not attend to these positions, while the attn mask masks the following tokens at each position to ensure the decoder do not look forward into the sequence.\n In encoder attention, we want the decoder to pay attention to the entire source sequence. The attn mask is not needed to mask the subsequent positions because it is not paying attention to itself.\n\n '''\n # print('encoder attention')\n state, attn = self.encoder_attn(query=state,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n need_weights=need_attn or (not self.training and self.need_attn))\n '''\n ___QUESTION-6-DESCRIBE-E-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.encoder_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state, attn", "def call(self, inputs):\n (input_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)\n with tf.name_scope('attention'):\n attention_output, attention_scores = self.attention_layer(\n from_tensor=input_tensor,\n to_tensor=input_tensor,\n attention_mask=attention_mask)\n with tf.name_scope('output'):\n attention_output = self.attention_output_dense(attention_output)\n attention_output = self.attention_dropout(attention_output)\n # Use float32 in keras layer norm and the gelu activation in the\n # intermediate dense layer for numeric stability\n attention_output = self.attention_layer_norm(input_tensor +\n attention_output)\n if self.float_type == tf.float16:\n attention_output = tf.cast(attention_output, tf.float16)\n\n with tf.name_scope('intermediate'):\n intermediate_output = self.intermediate_dense(attention_output)\n if self.float_type == tf.float16:\n intermediate_output = tf.cast(intermediate_output, tf.float16)\n\n with tf.name_scope('output'):\n layer_output = self.output_dense(intermediate_output)\n layer_output = self.output_dropout(layer_output)\n # Use float32 in keras layer norm for numeric stability\n layer_output = self.output_layer_norm(layer_output + attention_output)\n if self.float_type == tf.float16:\n layer_output = tf.cast(layer_output, tf.float16)\n return layer_output, attention_scores", "def forward(self, output):\n \n hidden_states = self.extract_hidden_states(output)\n \n # Obtaining the attention weights\n weighted_states = self.w1(hidden_states)\n activated_states = self.tanh(weighted_states)\n score_weights = self.w2(activated_states)\n attention_weights = self.softmax(score_weights)\n \n # Applying attention to the matrix with hidden states\n attentional_vector = torch.bmm(torch.transpose(attention_weights,2,1),hidden_states) \n attentional_vector = self.fc(torch.transpose(attentional_vector,2,1)).squeeze(2)\n attentional_vector = self._activation_fn(attentional_vector)\n \n return attentional_vector", "def self_attention(self, hidden):\n mul1 = self.attention1(hidden)\n mul2 = self.attention2(mul1)\n return self.softmax(mul2)", "def attention(decoder_state, coverage=None, num_words_section=None, step=None):\n with variable_scope.variable_scope(\"Attention\"):\n # Pass the decoder state through a linear layer (this is W_s s_t + b_attn in the paper)\n # (W_s s_t) + b_att is decoder_features; s_t = decoder_state\n decoder_features = linear(decoder_state, attention_vec_size, True) # shape (batch_size, attention_vec_size)\n decoder_features = tf.expand_dims(tf.expand_dims(decoder_features, 1), 1) # reshape to (batch_size, 1, 1, attention_vec_size)\n\n def masked_attention(e, enc_padding_mask):\n if enc_section_padding_mask is not None:\n enc_padding_mask = tf.reshape(enc_section_padding_mask, [batch_size, -1])\n enc_padding_mask = tf.cast(enc_padding_mask, tf.float32)\n \"\"\"Take softmax of e then apply enc_padding_mask and re-normalize\"\"\"\n attn_dist = nn_ops.softmax(e) # take softmax. shape (batch_size, attn_length)\n attn_dist *= enc_padding_mask # apply mask\n masked_sums = tf.reduce_sum(attn_dist, axis=1) # shape (batch_size)\n return attn_dist / tf.reshape(masked_sums, [-1, 1]) # re-normalize\n\n if use_coverage and coverage is not None: # non-first step of coverage\n if not hier:\n # Multiply coverage vector by w_c to get coverage_features.\n coverage_features = nn_ops.conv2d(coverage, w_c, [1, 1, 1, 1], \"SAME\") # c has shape (batch_size, seq_len, 1, attention_vec_size)\n \n # Calculate v^T tanh(W_h h_i + W_s s_t + w_c c_i^t + b_attn)\n e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features + coverage_features), [2, 3]) # shape (batch_size,seq_len)\n \n # Take softmax of e to get the attention distribution\n # attn_dist = nn_ops.softmax(e) # shape (batch_size, seq_len)\n attn_dist = masked_attention(e, enc_padding_mask)\n \n # Update coverage vector\n coverage += array_ops.reshape(attn_dist, [batch_size, -1, 1, 1]) # shape=(batch_size, seq_len,1,1)\n else:\n with tf.variable_scope(\"attention_sections\"):\n if FLAGS.fixed_attn:\n tf.logging.debug('running with fixed attn', '\\r')\n decoder_features_sec = linear(decoder_state, attention_vec_size, True, scope='Linear--Section-Features') # shape (batch_size, attention_vec_size)\n decoder_features_sec = tf.expand_dims(tf.expand_dims(decoder_features_sec, 1), 1) # reshape to (batch_size, 1, 1, attention_vec_size)\n e_sec = math_ops.reduce_sum(v_sec * math_ops.tanh(encoder_section_features + decoder_features_sec), [2, 3]) # [batch_size x seq_len_sections]\n attn_dist_sec = nn_ops.softmax(e_sec)\n else:\n e_sec = math_ops.reduce_sum(v_sec * math_ops.tanh(encoder_section_features + decoder_features), [2, 3]) # [batch_size x seq_len_sections]\n attn_dist_sec = nn_ops.softmax(e_sec)\n with tf.variable_scope(\"attention_words\"):\n coverage_features = nn_ops.conv2d(coverage, w_c, [1, 1, 1, 1], \"SAME\") # c has shape (batch_size, seq_len, 1, attention_vec_size)\n \n # Calculate v^T tanh(W_h h_i + W_s s_t + w_c c_i^t + b_attn)\n e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features + coverage_features), [2, 3]) # shape (batch_size,seq_len)\n\n # Multiply by section weights\n \n e = tf.reshape(e, [batch_size, -1, num_words_section[0][0]])\n e = tf.multiply(e, attn_dist_sec[:,:,tf.newaxis])\n e = tf.reshape(e, [batch_size,-1])\n\n\n# --- Some hack for reweighting attention (similar to temp for softmax)\n if temperature > 0.0:\n e = e * temperature\n \n attn_dist = masked_attention(e, enc_padding_mask)\n coverage += array_ops.reshape(attn_dist, [batch_size, -1, 1, 1]) # shape=(batch_size, seq_len,1,1)\n \n else:\n # Calculate v^T tanh(W_h h_i + W_s s_t + b_attn)\n if hier:\n with tf.variable_scope(\"attention_sections\"):\n if FLAGS.fixed_attn:\n decoder_features_sec = linear(decoder_state, attention_vec_size, True, scope='Linear--Section-Features') # shape (batch_size, attention_vec_size)\n decoder_features_sec = tf.expand_dims(tf.expand_dims(decoder_features_sec, 1), 1) # reshape to (batch_size, 1, 1, attention_vec_size)\n e_sec = math_ops.reduce_sum(\n v_sec * math_ops.tanh(encoder_section_features + decoder_features_sec), [2, 3]) # [batch_size x seq_len_sections]\n attn_dist_sec = nn_ops.softmax(e_sec)\n else:\n e_sec = math_ops.reduce_sum(\n v_sec * math_ops.tanh(encoder_section_features + decoder_features), [2, 3]) # [batch_size x seq_len_sections]\n attn_dist_sec = nn_ops.softmax(e_sec)\n\n with tf.variable_scope(\"attention_words\"):\n\n e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features), [2, 3]) #[batch_size x seq_len]\n\n e = tf.reshape(e, [batch_size, -1, num_words_section[0][0]])\n e = tf.multiply(e, attn_dist_sec[:,:,tf.newaxis])\n e = tf.reshape(e, [batch_size,-1])\n\n if temperature > 0.0:\n e = e * temperature\n \n attn_dist = masked_attention(e, enc_padding_mask)\n \n else:\n e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features), [2, 3]) # calculate e\n # Take softmax of e to get the attention distribution\n if enc_padding_mask is not None:\n attn_dist = masked_attention(e, enc_padding_mask)\n else:\n attn_dist = nn_ops.softmax(e) # shape (batch_size, seq_len)\n\n if use_coverage: # first step of training\n coverage = tf.expand_dims(tf.expand_dims(attn_dist,2),2) # initialize coverage\n\n # TODO: coverage for hier\n\n # Calculate the context vector from attn_dist and encoder_states\n # ecnoder_sates = [batch , seq_len , 1 , encoder_output_size], attn_dist = [batch, seq_len, 1, 1]\n context_vector = math_ops.reduce_sum(array_ops.reshape(attn_dist, [batch_size, -1, 1, 1]) * encoder_states, [1, 2]) # shape (batch_size, enc_output_size).\n context_vector = array_ops.reshape(context_vector, [-1, enc_output_size])\n\n if hier:\n return context_vector, attn_dist, coverage, attn_dist_sec\n else:\n return context_vector, attn_dist, coverage", "def Anatomical_attention_gate(featureMap1,featureMap2):\n ndims = len(featureMap1.get_shape()) - 2\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n# input_channels = featureMap1.get_shape().as_list()[-1]\n# batch_size1 = tf.shape(down_in)[0]\n# nf = tf.min(batch_size0,batch_size1)\n featureMap = concatenate([featureMap1, featureMap2])\n Conv = getattr(KL, 'Conv%dD' % ndims)\n tensorweight1 = Conv(1, kernel_size=1, padding='same',\n kernel_initializer='he_normal', use_bias = True, bias_initializer='zeros',strides=1,activation='sigmoid')(featureMap)\n# tensorweight1 = Activation('relu')(tensorweight1)\n w_featureMap1 = Multiply()([featureMap1,tensorweight1])\n tensorweight2 = Conv(1, kernel_size=1, padding='same',\n kernel_initializer='he_normal', use_bias = True, bias_initializer='zeros',strides=1,activation='sigmoid')(featureMap)\n# tensorweight2 = Activation('relu')(tensorweight2)\n w_featureMap2 = Multiply()([featureMap2,tensorweight2])\n w_featureMap = Add()([w_featureMap1,w_featureMap2])\n return w_featureMap", "def forward(self, x):\n\n padding_mask = (x == self.tokenizer.vocab['[PAD]'])\n\n x = x.transpose(0, 1).contiguous()\n\n positions = torch.arange(len(x), device=x.device).unsqueeze(-1)\n h = self.tokens_embeddings(x)\n h = h + self.position_embeddings(positions).expand_as(h)\n h = self.dropout(h)\n\n attn_mask = None\n if self.causal:\n attn_mask = torch.full((len(x), len(x)), -float('Inf'), device=h.device, dtype=h.dtype)\n attn_mask = torch.triu(attn_mask, diagonal=1)\n\n for layer_norm_1, attention, layer_norm_2, feed_forward in zip(self.layer_norms_1, self.attentions,\n self.layer_norms_2, self.feed_forwards):\n h = layer_norm_1(h)\n x, _ = attention(h, h, h, attn_mask=attn_mask, need_weights=False, key_padding_mask=padding_mask)\n x = self.dropout(x)\n h = x + h\n\n h = layer_norm_2(h)\n x = feed_forward(h)\n x = self.dropout(x)\n h = x + h\n return h", "def forward(self, aligned_feat):\n n, t, c, h, w = aligned_feat.size()\n # temporal attention\n embedding_ref = self.temporal_attn1(\n aligned_feat[:, self.center_frame_idx, :, :, :].clone())\n emb = self.temporal_attn2(aligned_feat.view(-1, c, h, w))\n emb = emb.view(n, t, -1, h, w) # (n, t, c, h, w)\n\n corr_l = [] # correlation list\n for i in range(t):\n emb_neighbor = emb[:, i, :, :, :]\n corr = torch.sum(emb_neighbor * embedding_ref, 1) # (n, h, w)\n corr_l.append(corr.unsqueeze(1)) # (n, 1, h, w)\n corr_prob = torch.sigmoid(torch.cat(corr_l, dim=1)) # (n, t, h, w)\n corr_prob = corr_prob.unsqueeze(2).expand(n, t, c, h, w)\n corr_prob = corr_prob.contiguous().view(n, -1, h, w) # (n, t*c, h, w)\n aligned_feat = aligned_feat.view(n, -1, h, w) * corr_prob\n\n # fusion\n feat = self.feat_fusion(aligned_feat)\n\n # spatial attention\n attn = self.spatial_attn1(aligned_feat)\n attn_max = self.max_pool(attn)\n attn_avg = self.avg_pool(attn)\n attn = self.spatial_attn2(torch.cat([attn_max, attn_avg], dim=1))\n # pyramid levels\n attn_level = self.spatial_attn_l1(attn)\n attn_max = self.max_pool(attn_level)\n attn_avg = self.avg_pool(attn_level)\n attn_level = self.spatial_attn_l2(\n torch.cat([attn_max, attn_avg], dim=1))\n attn_level = self.spatial_attn_l3(attn_level)\n attn_level = self.upsample(attn_level)\n\n attn = self.spatial_attn3(attn) + attn_level\n attn = self.spatial_attn4(attn)\n attn = self.upsample(attn)\n attn = self.spatial_attn5(attn)\n attn_add = self.spatial_attn_add2(self.spatial_attn_add1(attn))\n attn = torch.sigmoid(attn)\n\n # after initialization, * 2 makes (attn * 2) to be close to 1.\n feat = feat * attn * 2 + attn_add\n return feat", "def forward(self,\n state,\n encoder_out=None,\n encoder_padding_mask=None,\n incremental_state=None,\n prev_self_attn_state=None,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n need_attn=False,\n need_head_weights=False):\n\n # need_attn must be True if need_head_weights\n need_attn = True if need_head_weights else need_attn\n\n residual = state.clone()\n state, _ = self.self_attn(query=state,\n key=state,\n value=state,\n key_padding_mask=self_attn_padding_mask,\n need_weights=False,\n attn_mask=self_attn_mask)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n '''\n ___QUESTION-6-DESCRIBE-E-START___\n How does encoder attention differ from self attention? What is the difference between key_padding_mask\n and attn_mask? If you understand this difference, then why don't we need to give attn_mask here?\n\n Encoder attention differs from self-attention in that it attends to the\n output embeddings of the encoder instead of the embeddings in the decoder.\n key_padding_mask is used to adjust the length of the sentences, whereas\n attn_mask prevents the decoder from attending to future positions.\n We do not use attn_mask while attending to the decoder since we want all\n the embeddings in the decoder to have access to all the encoder output\n embeddings.\n '''\n state, attn = self.encoder_attn(query=state,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n need_weights=need_attn or (not self.training and self.need_attn))\n '''\n ___QUESTION-6-DESCRIBE-E-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.encoder_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state, attn", "def forward(self,\n x: Tensor = None,\n memory: Tensor = None,\n src_mask: Tensor = None,\n trg_mask: Tensor = None) -> Tensor:\n # decoder/target self-attention\n\n x_norm = self.x_layer_norm(x)\n# print(x_norm)\n# trg_mask()\n# print(trg_mask)\n\n h1 = self.trg_trg_att(x_norm, x_norm, x_norm, mask=trg_mask)\n h1 = self.dropout(h1) + x\n\n\n if self.src_trg_att:\n # source-target attention\n h1_norm = self.dec_layer_norm(h1)\n h2 = self.src_trg_att(memory, memory, h1_norm, mask=src_mask)\n # final position-wise feed-forward layer\n o = self.feed_forward(self.dropout(h2) + h1)\n else:\n o = self.feed_forward(h1)\n return o", "def _compute_attention(attention_mechanism, batch_size, cell_output, previous_alignments,\n attention_layer):\n line_alignments, word_alignments, hier_alignments = attention_mechanism(\n cell_output, batch_size, previous_alignments=previous_alignments)\n\n # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]\n # Context is the inner product of alignments and values along the\n # memory time dimension.\n # alignments shape is\n # [batch_size, 1, memory_time]\n # attention_mechanism.values shape is\n # [batch_size, memory_time, memory_size]\n # the batched matmul is over memory_time, so the output shape is\n # [batch_size, 1, memory_size].\n # we then squeeze out the singleton dim.\n expanded_line_alignments = array_ops.expand_dims(line_alignments, 1)\n line_context = math_ops.matmul(expanded_line_alignments, attention_mechanism.values)\n line_attention = array_ops.squeeze(line_context, [1])\n\n return line_attention, line_alignments, word_alignments, hier_alignments", "def temporal_activation_influence(trial_id, influencer, influenced):\n influencer_id, influenced_id = var(\"_influencer_id _influenced_id\")\n return (\n _name(trial_id, \"activation\", influencer_id, influencer) &\n _name(trial_id, \"activation\", influenced_id, influenced) &\n temporal_activation_influence_id(trial_id, influencer_id, influenced_id)\n )", "def compute_attention(self, decoder_state, forward_encoder_states, backward_encoder_states):\r\n\r\n\t\tassert len(forward_encoder_states) == len(backward_encoder_states)\r\n\t\tmax_encoding_len = len(forward_encoder_states)\r\n\t\toutput_states=[]\r\n\t\tfor i in range(max_encoding_len):\r\n\t\t\tstate = np.concatenate([forward_encoder_states[i]['h'], backward_encoder_states[i]['h']])\r\n\t\t\toutput_states.append(state)\r\n\r\n\t\ttiled_outputs = np.concatenate(output_states, 1)\r\n\t\ttiled_decoder = np.tile(decoder_state['h'], (1, max_encoding_len))\r\n\r\n\t\talpha = np.zeros((len(output_states), 1))\r\n\t\tfor i in range(max_encoding_len):\r\n\t\t\talpha[i] = np.matmul(self.attentionV.transpose(), np.tanh(np.matmul(self.memoryLayer, output_states[i]) + np.matmul(self.queryLayer, decoder_state['h'])))\r\n\t\talpha = softmax(alpha)\r\n\r\n\t\tu = np.zeros(output_states[0].shape)\r\n\t\tfor i in range(max_encoding_len):\r\n\t\t\tu += alpha[i, 0] * output_states[i]\r\n\t\ta = np.matmul(self.attentonLayer, np.concatenate([decoder_state['h'], u]))\r\n\r\n\t\treturn a, alpha", "def forward(self, x, src_states, src_mask, tgt_mask):\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))\n #print('decoder')\n #print(x.shape)\n x = self.sublayer[1](x, lambda x: self.src_attn(x, src_states, src_states, src_mask))\n #print(x.shape)\n return self.sublayer[2](x, self.feed_forward)", "def forward(self, input):\n y = self.tcn(input.transpose(1, 2)).transpose(1, 2)\n y = self.decoder(y[:,-1,:])\n y = self.sigmoid(y)\n return y", "def _forward_alg(self, feats):\n\n init_alphas = torch.full((self.batch_size, 1, self.tagset_size), -10000.).to(self.device)\n # START_TAG has all of the score.\n init_alphas[:, 0, self.tag2idx[START_TAG]] = 0.\n forward_var = init_alphas\n\n # Iterate through the sentence\n for i in range(feats.shape[1]):\n feat = feats[:,i,:]\n\n emit_score = feat.view(self.batch_size, self.tagset_size, 1)\n next_tag_var = forward_var + self.transitions + emit_score\n forward_var = torch.logsumexp(next_tag_var,dim=-1).view(self.batch_size, 1, self.tagset_size)\n\n terminal_var = forward_var + self.transitions[self.tag2idx[STOP_TAG]]\n alpha = torch.logsumexp(terminal_var,dim=-1)\n return alpha", "def self_attention_model(self):\n inputs = self.prepare_inputs()\n \n features = self.prepare_features(inputs)\n support_history_context_concat_emb = features['support_history_context_concat_emb']\n support_link_info_concat_emb = features['support_link_info_concat_emb']\n support_start_node_info_concat_emb = features['support_start_node_info_concat_emb']\n support_end_node_info_concat_emb = features['support_end_node_info_concat_emb']\n support_future_context_concat_emb = features['support_future_context_concat_emb']\n support_neighbor_link_info_concat_emb = features['support_neighbor_link_info_concat_emb']\n support_neighbor_start_node_info_concat_emb = features['support_neighbor_start_node_info_concat_emb']\n support_neighbor_end_node_info_concat_emb = features['support_neighbor_end_node_info_concat_emb']\n support_neighbor_link_state_concat_emb = features['support_neighbor_link_state_concat_emb']\n support_y_embed = features['support_duration_delta_concat_emb']\n\n query_history_context_concat_emb = features['query_history_context_concat_emb']\n query_link_info_concat_emb = features['query_link_info_concat_emb']\n query_start_node_info_concat_emb = features['query_start_node_info_concat_emb']\n query_end_node_info_concat_emb = features['query_end_node_info_concat_emb']\n query_future_context_concat_emb = features['query_future_context_concat_emb']\n query_neighbor_link_info_concat_emb = features['query_neighbor_link_info_concat_emb']\n query_neighbor_start_node_info_concat_emb = features['query_neighbor_start_node_info_concat_emb']\n query_neighbor_end_node_info_concat_emb = features['query_neighbor_end_node_info_concat_emb']\n query_neighbor_link_state_concat_emb = features['query_neighbor_link_state_concat_emb']\n \n support_x = self.constgat({\n 'history_context': support_history_context_concat_emb,\n 'link_info': support_link_info_concat_emb,\n 'start_node_info': support_start_node_info_concat_emb,\n 'end_node_info': support_end_node_info_concat_emb,\n 'future_context': support_future_context_concat_emb,\n 'neighbor_link_info': support_neighbor_link_info_concat_emb,\n 'neighbor_start_node_info': support_neighbor_start_node_info_concat_emb,\n 'neighbor_end_node_info': support_neighbor_end_node_info_concat_emb,\n 'neighbor_link_state': support_neighbor_link_state_concat_emb})\n\n query_x = self.constgat({\n 'history_context': query_history_context_concat_emb,\n 'link_info': query_link_info_concat_emb,\n 'start_node_info': query_start_node_info_concat_emb,\n 'end_node_info': query_end_node_info_concat_emb,\n 'future_context': query_future_context_concat_emb,\n 'neighbor_link_info': query_neighbor_link_info_concat_emb,\n 'neighbor_start_node_info': query_neighbor_start_node_info_concat_emb,\n 'neighbor_end_node_info': query_neighbor_end_node_info_concat_emb,\n 'neighbor_link_state': query_neighbor_link_state_concat_emb})\n query_x = query_x + layers.reduce_sum(inputs['query_mask']) * 0.0 + layers.reduce_sum(inputs['support_mask']) * 0.0 # IMPORTANT: for save_inference_model\n\n def forward_attention(indicator, support_x, support_y_embed, support_mask, query_x, query_y, query_mask):\n \"\"\"\n support_indicator: length = support_len\n if attention(support, query), indicator = 0\n if attention(support, support), indicator = 1\n \"\"\"\n support_y_embed = support_y_embed * support_mask\n support_xy = layers.concat([support_x, support_y_embed, indicator], axis=1)\n\n pad_value = layers.assign(input=numpy.array([0.0], dtype=numpy.float32))\n support_pad, support_len = layers.sequence_pad(support_xy, pad_value=pad_value)\n query_pad, query_len = layers.sequence_pad(query_x, pad_value=pad_value)\n\n attention = self.attention(query_pad, support_pad, support_pad, self.hidden_dim, 'meta')\n attention = layers.sequence_unpad(attention, length=query_len)\n pred_input = layers.concat([attention, query_x], axis=1)\n\n pred = self.prepare_preds_with_name(pred_input, 'out_pred')\n label = layers.cast(query_y, dtype='float32')\n label = layers.scale(label, scale=0.01)\n\n loss = layers.huber_loss(pred, label, 1.0) * query_mask\n loss = layers.mean(loss)\n return pred, label, loss\n\n indicator = support_y_embed * 0.0\n pred, label, loss1 = forward_attention(\n indicator, support_x, support_y_embed, inputs['support_mask'], \n query_x, inputs['query_duration_delta'], 1.0)\n indicator = support_y_embed * 1.0\n _, _, loss2 = forward_attention(\n indicator, support_x, support_y_embed, inputs['support_mask'], \n support_x, inputs['support_duration_delta'] - 120, (inputs['support_mask'] * (-1.0) + 1))\n loss = loss1 + loss2\n return pred, label, loss", "def forward(self, X, time_delay):\n\n\t\t# <----------- Getting the dimensions ----------->\n\t\tbatch_size, num_posts, num_words, emb_dim = X.shape\n\n\t\t# <----------- Reshaping X (Combining batch and posts) ----------->\n\t\tX = X.view(-1, num_words, emb_dim)\n\t\t\n\t\t# <----------- Do max pooling for X ----------->\n\t\tX = X.permute(0, 2, 1).contiguous()\n\t\tX = F.adaptive_max_pool1d(X, 1).squeeze(-1)\n\n\t\t# <----------- Rehshaping X (Shaping them back into batches) ----------->\n\t\tX = X.view(batch_size, num_tweets, -1)\n\n\t\t# <----------- Setting the query, key and val ----------->\n\t\tquery = X \n\t\tkey = X\n\t\tval = X\n\n\t\t# <----------- Passing the query, key and val with n number of feedforward layers ----------->\n\t\tfor i in range(self.config.num_emb_layers):\n\t\t\tquery = self.layer_norm(self.emb_layer_query[i](query))\n\t\t\tkey = self.layer_norm(self.emb_layer_key[i](key))\n\t\t\tval = self.layer_norm(self.emb_layer_val[i](val))\n\n\t\t# <----------- Adding in time delay information ----------->\n\t\tquery = query + time_delay\n\t\tkey = key + time_delay\n\t\tval = val + time_delay\n\n\t\t# <----------- Passing through post level transformer (Not keeping the attention values for now) ----------->\n\t\tself_atten_output, self_atten_weights_dict = self.transformer_post(query, key, val)\n\n\t\t# Getting the average embedding for the self attended output \n\t\tself_atten_output = self_atten_output.permute(0, 2, 1).contiguous()\n\t\tself_atten_output = F.adaptive_max_pool1d(self_atten_output, 1).squeeze(-1)\n\n\t\t# <------- Passing through a feed foward layer then do a softmax to do prediction ------->\n\t\toutput = self.final_layer(self_atten_output)\n\n\t\treturn output, self_atten_weights_dict", "def transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False,\n conv_kernel_size=3,\n head_ratio=2,\n conv_type=\"noconv\",\n **kargs):\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size / num_attention_heads)\n input_shape = bert_utils.get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n prev_output = bert_utils.reshape_to_matrix(input_tensor)\n\n attn_maps = []\n all_layer_outputs = []\n all_value_outputs = []\n for layer_idx in range(num_hidden_layers):\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head, probs, value_layer = attention_layer(\n from_tensor=prev_output,\n to_tensor=prev_output,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=True,\n batch_size=batch_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length,\n conv_kernel_size=conv_kernel_size,\n head_ratio=head_ratio,\n conv_type=conv_type,\n from_tensor_mask=kargs.get('from_tensor_mask', None),\n to_tensor_mask=kargs.get('to_tensor_mask', None),\n conv_method=kargs.get('conv_method', \"dot\"))\n attention_heads.append(attention_head)\n attn_maps.append(probs)\n all_value_outputs.append(value_layer)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(attention_heads, axis=-1)\n\n with tf.variable_scope(\"output\"):\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n \n\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + prev_output)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n intermediate_size,\n activation=intermediate_act_fn,\n kernel_initializer=create_initializer(initializer_range))\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n prev_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n\n prev_output = dropout(prev_output, hidden_dropout_prob)\n prev_output = layer_norm(prev_output + attention_output)\n all_layer_outputs.append(prev_output)\n\n attn_maps = tf.stack(attn_maps, 0)\n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = bert_utils.reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs, attn_maps, all_value_outputs\n else:\n final_output = bert_utils.reshape_from_matrix(prev_output, input_shape)\n return final_output, attn_maps, all_value_outputs", "def feed_forward(self):\n self.hidden_activation = self._sigmoid(np.dot(self.input_activation, self.w1))\n self.output_activation = self._sigmoid(np.dot(self.hidden_activation, self.w2))", "def attention_weight(x, fixed_weights_attention, biais_attention, step_dim):\n \"\"\" fixed_weights_attention (array) : Fixed weight of the learned attention layer\n biais_attention (array) : bias of the learned attention layer\n step_dim (int) : maxlen \"\"\"\n \"\"\" return : weights (array)\"\"\"\n\n features_dim = fixed_weights_attention.shape[0]\n\n eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),\n K.reshape(fixed_weights_attention, (features_dim, 1))), (-1, step_dim))\n\n eij += biais_attention\n\n eij = K.tanh(eij)\n\n a = K.exp(eij)\n\n a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n\n weights = K.expand_dims(a)\n # weighted_input = x * a\n return weights", "def forward(self, text, prev_mel):\n # forward pass through text embedding and get k and v\n kv = self.t_encoder(text)\n k = kv[:,:self.hp.d,:]\n v = kv[:,self.hp.d:,:]\n # forward pass through audio encoding and get Q\n q = self.a_encoder(prev_mel)\n \n # compute attention\n a = (k.transpose(2,1)).matmul(q)/np.sqrt(self.hp.d)\n a = F.softmax(a, dim=1)\n r = v.matmul(a)\n \n # create R' and forward pass through decoder\n # note that the decoder does not have sigmoid transform at the end, so we are actually getting \n # ylogit\n rprime = torch.cat((r, q), dim=1)\n ylogit = self.decoder(rprime)\n y = F.sigmoid(ylogit)\n return y, ylogit, a", "def forward(self, hidden, attn, verbose=False):\n # Original probabilities.\n logits = self.linear(hidden)\n logits[:, onmt.Constants.UNK] = -float('inf')\n logits[:, onmt.Constants.PAD] = -float('inf')\n prob = F.softmax(logits)\n\n # Probability of copying p(z=1) batch\n copy = F.sigmoid(self.linear_copy(hidden))\n\n # Probibility of not copying: p_{word}(w) * (1 - p(z))\n out_prob = torch.mul(prob, 1 - copy.expand_as(prob))\n mul_attn = torch.mul(attn, copy.expand_as(attn))\n return out_prob, mul_attn", "def call(self, inputs):\n (from_tensor, to_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n # `query_tensor` = [B, F, N ,H]\n query_tensor = self.query_dense(from_tensor)\n\n # `key_tensor` = [B, T, N, H]\n key_tensor = self.key_dense(to_tensor)\n\n # `value_tensor` = [B, T, N, H]\n value_tensor = self.value_dense(to_tensor)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n attention_scores = tf.einsum(\"BTNH,BFNH->BNFT\", key_tensor, query_tensor)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(self.size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, attention_scores.dtype)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.attention_probs_dropout(attention_probs)\n\n # `context_layer` = [B, F, N, H]\n context_tensor = tf.einsum(\"BNFT,BTNH->BFNH\", attention_probs, value_tensor)\n\n return context_tensor, attention_scores", "def forward_att(self, eouts, elens, ys, trigger_points=None):\n losses_auxiliary = {}\n ys_in, ys_out, ylens = append_sos_eos(ys, self.eos, self.eos, self.pad, self.device, self.bwd)\n if not self.training:\n self.data_dict['elens'] = tensor2np(elens)\n self.data_dict['ylens'] = tensor2np(ylens)\n self.data_dict['ys'] = tensor2np(ys_out)\n bs, ymax = ys_in.size()[:2]\n tgt_mask = (ys_out != self.pad).unsqueeze(1).repeat([1, ymax, 1])\n causal_mask = tgt_mask.new_ones(ymax, ymax, dtype=tgt_mask.dtype)\n causal_mask = torch.tril(causal_mask).unsqueeze(0)\n tgt_mask = tgt_mask & causal_mask\n src_mask = make_pad_mask(elens).unsqueeze(1).repeat([1, ymax, 1])\n if self.attn_type == 'mocha':\n attn_mask = (ys_out != self.pad).unsqueeze(1).unsqueeze(3)\n else:\n attn_mask = None\n lmout = None\n if self.lm is not None:\n self.lm.eval()\n with torch.no_grad():\n lmout, lmstate, _ = self.lm.predict(ys_in, None)\n lmout = self.lm_output_proj(lmout)\n out = self.pos_enc(self.embed_token_id(ys_in), scale=True)\n xy_aws_layers = []\n xy_aws = None\n for lth, layer in enumerate(self.layers):\n out = layer(out, tgt_mask, eouts, src_mask, mode='parallel', lmout=lmout)\n xy_aws = layer.xy_aws\n if xy_aws is not None and self.attn_type == 'mocha':\n xy_aws_masked = xy_aws.masked_fill_(attn_mask.expand_as(xy_aws) == 0, 0)\n xy_aws_layers.append(xy_aws_masked.clone())\n if not self.training:\n self.aws_dict['yy_aws_layer%d' % lth] = tensor2np(layer.yy_aws)\n self.aws_dict['xy_aws_layer%d' % lth] = tensor2np(layer.xy_aws)\n self.aws_dict['xy_aws_beta_layer%d' % lth] = tensor2np(layer.xy_aws_beta)\n self.aws_dict['xy_aws_p_choose%d' % lth] = tensor2np(layer.xy_aws_p_choose)\n self.aws_dict['yy_aws_lm_layer%d' % lth] = tensor2np(layer.yy_aws_lm)\n logits = self.output(self.norm_out(out))\n loss, ppl = cross_entropy_lsm(logits, ys_out, self.lsm_prob, self.pad, self.training)\n losses_auxiliary['loss_quantity'] = 0.0\n if self.attn_type == 'mocha':\n n_tokens_ref = tgt_mask[:, -1, :].sum(1).float()\n n_tokens_pred = sum([torch.abs(aws.sum(3).sum(2).sum(1) / aws.size(1)) for aws in xy_aws_layers])\n n_tokens_pred /= len(xy_aws_layers)\n losses_auxiliary['loss_quantity'] = torch.mean(torch.abs(n_tokens_pred - n_tokens_ref))\n acc = compute_accuracy(logits, ys_out, self.pad)\n return loss, acc, ppl, losses_auxiliary", "def _prepare_attended_output(self,\n decoder_hidden_state: torch.Tensor,\n state: Dict[str, torch.Tensor]) -> torch.Tensor:\n # Ensure mask is also a FloatTensor. Or else the multiplication within\n # attention will complain.\n # shape: (batch_size, max_input_sequence_length)\n\n encoder_outputs = state[\"encoder_outputs\"]\n source_mask = state[\"source_mask\"]\n prev_attention = state[\"attention\"]\n att_keys = state[\"att_keys\"]\n att_values = state[\"att_values\"]\n\n # shape: (batch_size, max_input_sequence_length)\n mode = \"soft\" if self.training else \"hard\"\n if isinstance(self._attention, MonotonicAttention):\n encoder_outs: Dict[str, torch.Tensor] = {\n \"value\": state[\"encoder_outputs\"],\n \"mask\": state[\"source_mask\"]\n }\n\n monotonic_attention, chunk_attention = self._attention(\n encoder_outs, decoder_hidden_state, prev_attention, mode=mode)\n # shape: (batch_size, encoder_output_dim)\n attended_output = util.weighted_sum(\n encoder_outputs, chunk_attention)\n attention = monotonic_attention\n elif isinstance(self._attention, StatefulAttention):\n attended_output, attention = self._attention(decoder_hidden_state,\n att_keys, att_values, source_mask)\n else:\n attention = self._attention(\n decoder_hidden_state, source_mask)\n attended_output = util.weighted_sum(\n encoder_outputs, attention)\n\n return attended_output, attention", "def forward(self, input):\n if self.dataset_name == 'mnist':\n if self.temp_attn:\n y, attn_weight_list = self.tcanet(input)\n o = self.decoder(y[:, :, -1])\n return F.log_softmax(o, dim=1).contiguous()\n else:\n y = self.tcanet(input)\n o = self.decoder(y[:, :, -1])\n return F.log_softmax(o, dim=1).contiguous()\n emb = self.drop(self.word_encoder(input))\n if self.temp_attn:\n y, attn_weight_list = self.tcanet(emb.transpose(1, 2))\n y = self.decoder(y.transpose(1, 2))\n return y.contiguous(), [attn_weight_list[0], attn_weight_list[self.num_levels // 2], attn_weight_list[-1]]\n else:\n y = self.tcanet(emb.transpose(1, 2))\n y = self.decoder(y.transpose(1, 2))\n return y.contiguous()", "def forward(X,params,name='',activation=sigmoid):\n pre_act, post_act = None, None\n # get the layer parameters\n W = params['W' + name]\n b = params['b' + name]\n\n # your code here\n pre_act = X @ W + b\n post_act = activation(pre_act)\n\n # store the pre-activation and post-activation values\n # these will be important in backprop\n params['cache_' + name] = (X, pre_act, post_act)\n\n return post_act", "def forward(self, inputs):\n x_wrd = self.lookup(inputs)\n\n # bilinear attention\n x_avg = x_wrd.mean(dim=1)\n x = x_wrd.matmul(self.M)\n x = x.matmul(x_avg.unsqueeze(1).transpose(1, 2))\n if self.b is not None:\n x += self.b\n\n x = F.tanh(x) \n a = F.softmax(x, dim=1)\n\n z = a.transpose(1, 2).matmul(x_wrd)\n z = z.squeeze()\n if z.dim() == 1:\n return z.unsqueeze(0)\n return z", "def backpropagate(eta, momentum):\n for i_lay in range(len(layers)-1, 0, -1):\n lay = layers[i_lay]\n if i_lay == len(layers)-1:\n lay[\"delta\"] = lay[\"error\"] * dlogistic(lay[\"v\"])\n else:\n lay[\"delta\"] = (layers[i_lay+1][\"weigths\"][:, 1:].T @ layers[i_lay+1]\n [\"delta\"]) * dlogistic(lay[\"v\"])\n lay[\"Delta_w\"] = eta * lay[\"delta\"] @ layers[i_lay - 1][\"y\"].T +\\\n momentum * lay[\"Delta_w\"]", "def act(self, x: np.ndarray, t: int = None, noise: np.ndarray = None) -> np.ndarray:", "def forward(self, y, h):\n y = y.transpose(1, 0)\n\n t = self.linear_in(h)\n target = self.linear_in(h).unsqueeze(2) # batch x dim x 1\n\n # Get attention\n attn = torch.bmm(y, target).squeeze(2) # batch x T\n attn = F.softmax(attn, dim=1)\n attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x T\n\n weighted_y = torch.bmm(attn3, y).squeeze(1) # batch x dim\n h_tilde = torch.cat((weighted_y, h), 1)\n\n h_tilde = torch.tanh(self.linear_out(h_tilde))\n\n return h_tilde, attn", "def forward(self, inputs):\n location_bias, context_bias, pos_emb, q, k, v, mask = inputs\n\n d_feature = q.shape[-1]\n n_heads = self._n_heads\n if d_feature % n_heads != 0:\n raise ValueError(\n f'Dimensionality of feature embedding ({d_feature}) is not a '\n f'multiple of the requested number of attention heads ({n_heads}).')\n\n if self._mode == 'predict':\n self._fast_inference_update_state((k, v), self.state)\n (k, v, _) = self.state\n\n per_head_results, dots = DotProductAttention(\n SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(q),\n SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(k),\n SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(v),\n pos_emb.reshape((-1, n_heads, d_feature // n_heads)),\n context_bias,\n location_bias,\n mask,\n dropout=self._dropout,\n mode=self._mode,\n rng=self.rng,\n chunk_len=self._chunk_len,\n chunk_offset=self._chunk_offset)\n if self._mode == 'viz':\n self.state = dots\n merged_results = MergeHeads(\n n_heads, merged_batch_and_head=False).forward(per_head_results)\n return merged_results, mask", "def forward(Observation, Emission, Transition, Initial):\n # Hidden States\n N = Transition.shape[0]\n\n # Observations\n T = Observation.shape[0]\n\n # F == alpha\n # initialization α1(j) = πjbj(o1) 1 ≤ j ≤ N\n F = np.zeros((N, T))\n F[:, 0] = Initial.T * Emission[:, Observation[0]]\n\n # formula shorturl.at/amtJT\n # Recursion αt(j) == ∑Ni=1 αt−1(i)ai jbj(ot); 1≤j≤N,1<t≤T\n for t in range(1, T):\n for n in range(N):\n Transitions = Transition[:, n]\n Emissions = Emission[n, Observation[t]]\n F[n, t] = np.sum(Transitions * F[:, t - 1]\n * Emissions)\n\n # Termination P(O|λ) == ∑Ni=1 αT (i)\n # P = np.sum(F[:, -1])\n return F", "def parse_attention_example(tf_example):\n\n # specify features in attention example \n features_map = {\n 'sequence_raw': tf.FixedLenFeature([], tf.string),\n 'label_raw': tf.FixedLenFeature([], tf.string),\n 'annotation_raw': tf.FixedLenFeature([], tf.string)}\n\n # parse tf example for internal tensors\n parsed_example = tf.parse_single_example(tf_example, features_map)\n\n # decode examples\n sequence_raw = tf.decode_raw(parsed_example['sequence_raw'], tf.uint8)\n label_raw = tf.decode_raw(parsed_example['label_raw'], tf.uint8)\n annotation_raw = tf.decode_raw(parsed_example['annotation_raw'], tf.float32)\n\n # parsed tensors are flat so reshape if needed\n # cast to floats for attention task\n sequence = tf.cast(tf.reshape(sequence_raw, SEQUENCE_SHAPE), dtype=tf.float32)\n label = tf.cast(label_raw, dtype=tf.float32)\n annotation = tf.reshape(annotation_raw, ANNOTATION_SHAPE)\n\n return {'sequence': sequence, 'label': label, 'annotation': annotation}", "def forward(self, h_prev, x_t):\n hidden_con = np.concatenate((h_prev.T, x_t.T), axis=0)\n h_next = np.tanh((np.matmul(hidden_con.T, self.Wh)) + self.bh)\n y = self.softmax((np.matmul(h_next, self.Wy)) + self.by)\n return h_next, y", "def forward(self, enc_states, enc_len, dec_states):\n if self.precomputed_enc_h is None:\n\n self.precomputed_enc_h = self.mlp_enc(enc_states)\n self.mask = length_to_mask(\n enc_len, max_len=enc_states.size(1), device=enc_states.device\n )\n\n # multiply mask by 1/Ln for each row\n self.prev_attn = self.mask * (1 / enc_len.float()).unsqueeze(1)\n\n # compute location-aware features\n # [B, 1, L] -> [B, C, L]\n attn_conv = self.conv_loc(self.prev_attn.unsqueeze(1))\n # [B, C, L] -> [B, L, C] -> [B, L, F]\n attn_conv = self.mlp_loc(attn_conv.transpose(1, 2))\n\n dec_h = self.mlp_dec(dec_states.unsqueeze(1))\n attn = self.mlp_attn(\n torch.tanh(self.precomputed_enc_h + dec_h + attn_conv)\n ).squeeze(-1)\n\n # mask the padded frames\n attn = attn.masked_fill(self.mask == 0, -np.inf)\n attn = self.softmax(attn * self.scaling)\n\n # set prev_attn to current attn for the next timestep\n self.prev_attn = attn.detach()\n\n # compute context vectors\n # [B, 1, L] X [B, L, F]\n context = torch.bmm(attn.unsqueeze(1), enc_states).squeeze(1)\n context = self.mlp_out(context)\n\n return context, attn", "def feedForward(self, inputs):\n self.ai = np.array(inputs)\n self.ah1 = tanh(self.ai.dot(self.wi))\n self.ah2 = tanh(self.ah1.dot(self.wh))\n self.ao = softmax(self.ah2.dot(self.wo))", "def feedforward(self, inputs):\n # hidden activations\n # a_hidden = self.transfer(np.dot(self.w_input, inputs))\n a_hidden1 = self.transfer(np.dot(inputs, self.w_input))\n \n dots1 = (np.dot(a_hidden1, self.w_middle))\n a_hidden2 = self.transfer(np.asarray(dots1))\n \n #a_output = self.transfer(np.dot(self.w_output, a_hidden))\n dots2 = (np.dot(a_hidden2, self.w_output))\n a_output = self.transfer(np.asarray(dots2))\n \n return (a_hidden1, a_hidden2, a_output)", "def _forward_aftf(self, model_tf, model_af, teacher_forcing_ratio, src, tgt=None, hidden=None):\n\n\t\t\"\"\"\n\t\t\tArgs:\n\t\t\t\tsrc: list of src word_ids [batch_size, max_seq_len, word_ids]\n\t\t\t\ttgt: list of tgt word_ids\n\t\t\t\thidden: initial hidden state\n\t\t\tReturns:\n\t\t\t\tdecoder_outputs: list of step_output - log predicted_softmax [batch_size, 1, vocab_size_dec] * (T-1)\n\t\t\t\tret_dict\n\t\t\"\"\"\n\n\t\tif self.use_gpu and torch.cuda.is_available():\n\t\t\tglobal device\n\t\t\tdevice = torch.device('cuda')\n\t\telse:\n\t\t\tdevice = torch.device('cpu')\t\n\t\t\t\n\t\t# 00. init\n\t\tmodel_tf.batch_size = model_af.batch_size\n\t\tassert model_af.hidden_size_shared == model_tf.hidden_size_shared, \\\n\t\t\t'mismatch hidden_size_shared tf:af {}:{}'.format(model_tf.hidden_size_shared,model_af.hidden_size_shared)\n\t\tassert model_af.max_seq_len == model_tf.max_seq_len, \\\n\t\t\t'mismatch max_seq_len tf:af {}:{}'.format(model_tf.max_seq_len,model_af.max_seq_len)\n\t\tbatch_size = model_af.batch_size\n\t\thidden_size_shared = model_af.hidden_size_shared\n\t\tmax_seq_len = model_af.max_seq_len\n\n\t\t# 0. init var for af model\n\t\tret_dict = dict()\n\t\tret_dict[KEY_ATTN_SCORE] = []\n\t\tret_dict[KEY_ATTN_REF] = []\n\t\tret_dict[KEY_SEQUENCE] = []\n\t\tret_dict[KEY_LENGTH] = []\n\t\tdecoder_outputs = []\n\t\tsequence_symbols = []\n\t\tsequence_symbols_tf = []\n\n\t\t# 1,2. prep att keys & vals \n\t\tmask_src = src.data.eq(PAD)\t\t\n\t\temb_src_tf, emb_tgt_tf, att_keys_tf, att_vals_tf = model_tf.forward_prep_attkeys(src, tgt, hidden)\n\t\temb_src_af, emb_tgt_af, att_keys_af, att_vals_af = model_af.forward_prep_attkeys(src, tgt, hidden)\n\n\t\t# 3. init hidden states\n\t\tdec_hidden = None\n\n\t\t# 4. init for run dec + att + shared + output\n\t\tcell_value = torch.FloatTensor([0]).repeat(batch_size, 1, hidden_size_shared).to(device=device)\n\t\tprev_c = torch.FloatTensor([0]).repeat(batch_size, 1, max_seq_len).to(device=device) #dummy - for hybrid att only\n\n\t\ttgt_chunk_af = emb_tgt_af[:, 0].unsqueeze(1)\n\t\ttgt_chunk_tf = emb_tgt_tf[:, 0].unsqueeze(1)\n\t\tcell_value_af = cell_value\n\t\tcell_value_tf = cell_value\n\t\tprev_c_af = prev_c\n\t\tprev_c_tf = prev_c\n\t\tdec_hidden_af = dec_hidden\n\t\tdec_hidden_tf = dec_hidden\n\t\tlengths_tf = np.array([max_seq_len] * batch_size)\n\t\tlengths_af = np.array([max_seq_len] * batch_size)\n\n\n\t\t# 5. for loop over [w1 -> tf -> attref -> af -> w2]\n\t\t# note that when using att_tf; w1 does not change att scores: no effect on w2 generation at all\n\t\t# bracketd () parts are inactive parts \n\n\t\t# 5.0 to do tf or not\n\t\tuse_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n\t\tfor idx in range(max_seq_len - 1):\n\n\t\t\t# 5.1 gen refatt: [TF] w1 -> tf -> att_tf (-> tf -> w2_tf)\n\t\t\tpredicted_softmax_tf, dec_hidden_tf, step_attn_tf, c_out_tf, cell_value_tf = \\\n\t\t\t\tmodel_tf.forward_step(att_keys_tf, att_vals_tf, tgt_chunk_tf, cell_value_tf, dec_hidden_tf, mask_src, prev_c_tf, use_gpu=self.use_gpu)\n\t\t\tstep_output_tf = predicted_softmax_tf.squeeze(1)\n\t\t\tsymbols_tf, lengths_tf, sequence_symbols_tf = model_tf.forward_decode(idx, step_output_tf, lengths_tf, sequence_symbols_tf)\n\t\t\tprev_c_tf = c_out_tf\n\t\t\t# import pdb; pdb.set_trace()\n\n\t\t\t# 5.2 detach refatt\n\t\t\tstep_attn_ref_detach = step_attn_tf.detach() \n\t\t\tstep_attn_ref_detach = step_attn_ref_detach.type(torch.FloatTensor).to(device=device)\n\t\t\tret_dict[KEY_ATTN_REF].append(step_attn_ref_detach)\n\t\t\t# import pdb; pdb.set_trace()\n\n\t\t\t# 5.3 gen word prediction: [AF] (w1 -> af -> att_af) att_tf -> af -> w2_af\n\t\t\tpredicted_softmax_af, dec_hidden_af, step_attn_af, c_out_af, cell_value_af = \\\n\t\t\t\tmodel_af.forward_step(att_keys_af, att_vals_af, \n\t\t\t\t\ttgt_chunk_af, cell_value_af, dec_hidden_af, mask_src, prev_c_af, att_ref=step_attn_ref_detach, use_gpu=self.use_gpu)\n\t\t\tstep_output_af = predicted_softmax_af.squeeze(1)\n\t\t\tsymbols_af, lengths_af, sequence_symbols = model_tf.forward_decode(idx, step_output_af, lengths_af, sequence_symbols)\n\t\t\tprev_c_af = c_out_af\n\t\t\t# import pdb; pdb.set_trace()\n\n\t\t\t# 5.4 store var for af model\n\t\t\tret_dict[KEY_ATTN_SCORE].append(step_attn_af)\n\t\t\tdecoder_outputs.append(step_output_af)\n\n\t\t\t# 5.5 set w2 as w2_af \n\t\t\tif use_teacher_forcing:\n\t\t\t\ttgt_chunk_af = emb_tgt_af[:, idx+1].unsqueeze(1)\n\t\t\t\ttgt_chunk_tf = emb_tgt_tf[:, idx+1].unsqueeze(1)\n\t\t\telse:\n\t\t\t\ttgt_chunk_af = model_af.embedder_dec(symbols_af)\n\t\t\t\ttgt_chunk_tf = model_tf.embedder_dec(symbols_af)\n\t\t\t# if idx < 5:\n\t\t\t# \timport pdb; pdb.set_trace()\n\n\t\t# print('...')\n\t\tret_dict[KEY_SEQUENCE] = sequence_symbols\n\t\tret_dict[KEY_LENGTH] = lengths_af.tolist()\n\n\t\treturn decoder_outputs, dec_hidden_af, ret_dict", "def lstm_atten(self):\n # Model.\n model = Sequential()\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.5))\n model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n # model.add(Dropout(0.5))\n\n attention = Dense(1, activation='tanh')(activations)\n attention = Flatten()(attention)\n attention = Activation('softmax')(attention)\n attention = RepeatVector(2048)(attention)\n attention = Permute([2, 1])(attention)\n\n sent_representation = concatenate([activations, attention], mode='mul')\n sent_representation = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(2048,))(sent_representation)\n\n probabilities = Dense(self.nb_classes, activation='softmax')(sent_representation)\n\n model = model(input=self.input_shape, output=probabilities )\n\n dense1800 = Dense(4096, activation='relu')\n\n #dense1800 = Dense(1800, activation='relu', kernel_regularizer=regularizers.l2(0.01))(inputs)\n attention_probs = Dense(4096, activation='sigmoid', name='attention_probs')(dense1800)\n attention_mul = multiply([dense1800, attention_probs], name='attention_mul')\n dense7 = Dense(self.nb_classes, kernel_regularizer=regularizers.l2(0.01), activation='softmax')(attention_mul)\n model = Model(input=[self.input_shape], output=dense7)\n return model", "def forward(self, queries, keys, mask=None, attn_prior=None, speaker_embed=None):\n if speaker_embed is not None:\n keys = keys + self.key_spk_proj(speaker_embed.unsqueeze(1).expand(-1, keys.shape[-1], -1)).transpose(1, 2)\n queries = queries + self.query_spk_proj(speaker_embed.unsqueeze(1).expand(-1, queries.shape[-1], -1)).transpose(1, 2)\n keys_enc = self.key_proj(keys)\n queries_enc = self.query_proj(queries)\n attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None]) ** 2\n attn = -self.temperature * attn.sum(1, keepdim=True)\n if attn_prior is not None:\n attn = self.log_softmax(attn) + torch.log(attn_prior[:, None] + 1e-08)\n attn_logprob = attn.clone()\n if mask is not None:\n attn.data.masked_fill_(mask.permute(0, 2, 1).unsqueeze(2), -float('inf'))\n attn = self.softmax(attn)\n return attn, attn_logprob", "def __call__(self, x, is_training, nfilt=32, reuse=False):\n with tf.variable_scope(self.name):\n x = tf.reshape(x, [-1, self.input_dim, self.input_dim, self.channels])\n\n # attnh1 = unet_conv(x, nfilt*1, 'attnh1', reuse, is_training, use_batch_norm=False)\n # attn1 = unet_conv_t(attnh1, None, 1, 'attn1', reuse, is_training, activation=tf.nn.tanh)\n\n # attnh2 = unet_conv(attnh1, nfilt*2, 'attnh2', reuse, is_training)\n # attn2 = unet_conv_t(attnh2, None, nfilt*2, 'attn2_1', reuse, is_training)\n # attn2 = unet_conv_t(attn2, None, 1, 'attn2_2', reuse, is_training, activation=tf.nn.tanh)\n\n # attnh3 = unet_conv(attnh2, nfilt*4, 'attnh3', reuse, is_training)\n # attn3 = unet_conv_t(attnh3, None, nfilt*4, 'attn3_1', reuse, is_training)\n # attn3 = unet_conv_t(attn3, None, nfilt*2, 'attn3_2', reuse, is_training)\n # attn3 = unet_conv_t(attn3, None, 1, 'attn3_3', reuse, is_training, activation=tf.nn.tanh)\n\n # salience = tf.concat([attn1, attn2, attn3], 3)\n # salience = conv(salience, 1, 'salience', s=1, reuse=reuse)\n # salience = tf.reshape(salience, (-1, self.input_dim*self.input_dim*1))\n # salience = tf.nn.softmax(salience)\n # salience = tf.reshape(salience, (-1, self.input_dim,self.input_dim,1))\n\n h1 = unet_conv(x, nfilt*1, 'h1', reuse, is_training, use_batch_norm=False)\n h2 = unet_conv(h1, nfilt*2, 'h2', reuse, is_training)\n h3 = unet_conv(h2, nfilt*4, 'h3', reuse, is_training)\n out = unet_conv(h3, 1, 'out', reuse, is_training, use_batch_norm=False, activation=None)\n\n return out", "def forward(self, adj, features, inference=False):\n\n # if inference:\n #\n # # Generate the posterior embeddings\n # self.z = self.h_mean + randn_like(self.h_mean) * exp(self.h_log_std)\n #\n # else:\n\n # Perform the GNN layer that is shared for both the mean and the std layers\n h = F.relu(self.conv_shared(adj, features))\n h = self.conv_dropout_1(h)\n\n # Perform the GNN layer to obtain embedding means\n self.h_mean = self.conv_mean(adj, h)\n\n # Perform the GNN layer to obtain embeddings std\n self.h_log_std = self.conv_log_std(adj, h)\n\n # Generate the posterior embeddings\n self.z = self.h_mean + randn_like(self.h_mean) * exp(self.h_log_std)\n\n # Reconstruct the graph\n reconstruction = matmul(self.z, transpose(self.z, 0, 1))\n\n return self.output_func(reconstruction)", "def forward(self, input_, context, pos_emb, mask_tgt, mask_src, mems=None,\n incremental=False, incremental_cache=None):\n # incremental=False, incremental_cache=None, reuse_source=True):\n assert context is None, \"This model does not have an context encoder\"\n\n coin = True\n if self.training and self.death_rate > 0:\n coin = (torch.rand(1)[0].item() >= self.death_rate)\n\n if coin:\n # input and context should be time first ?\n query = self.preprocess_attn(input_)\n\n if mems is not None and mems.size(0) > 0:\n mems = self.preprocess_attn(mems)\n else:\n mems = None\n\n # out, _ = self.multihead_tgt(query, pos_emb, r_w_bias, r_r_bias, attn_mask=mask_tgt)\n out, _, incremental_cache = self.multihead_tgt(query, pos_emb, attn_mask=mask_tgt, mems=mems,\n incremental=incremental, incremental_cache=incremental_cache)\n\n # rescaling before residual\n if self.training and self.death_rate > 0:\n out = out / (1 - self.death_rate)\n\n input_ = self.postprocess_attn(out, input_)\n\n \"\"\" Context Attention layer \n layernorm > attn > dropout > residual\n \"\"\"\n\n coverage = None\n\n \"\"\" Feed forward layer \n layernorm > ffn > dropout > residual\n \"\"\"\n out = self.feedforward(self.preprocess_ffn(input_))\n\n # rescaling before residual\n if self.training and self.death_rate > 0:\n out = out / (1 - self.death_rate)\n\n input_ = self.postprocess_ffn(out, input_)\n else:\n coverage = None\n\n if incremental:\n return input_, coverage, incremental_cache\n\n return input_, coverage", "def forward(self, h_prev, x_t):\r\n m, i = x_t.shape\r\n _, h = h_prev.shape\r\n x_ht = np.hstack((h_prev, x_t))\r\n h_next = np.tanh(np.matmul(x_ht, self.Wh) + self.bh)\r\n y_n = np.matmul(h_next, self.Wy) + self.by\r\n y = self.softmax(y_n)\r\n return (h_next, y)", "def forward(self, x, h, u, time, feat_kernels_enc_conv, feat_bias_enc_conv, feat_kernels_enc_fc, feat_bias_enc_fc, feat_kernels_enc_3dgru, feat_bias_enc_3dgru):\n\n\n conv1a_wt,conv1b_wt,conv2a_wt,conv2b_wt,conv2c_wt,conv3a_wt,conv3b_wt,conv3c_wt,conv4a_wt,conv4b_wt,conv5a_wt,conv5b_wt,conv5c_wt,conv6a_wt,conv6b_wt = feat_kernels_enc_conv\n conv1a_bias,conv1b_bias,conv2a_bias,conv2b_bias,conv2c_bias,conv3a_bias,conv3b_bias,conv3c_bias,conv4a_bias,conv4b_bias,conv5a_bias,conv5b_bias,conv5c_bias,conv6a_bias,conv6b_bias = feat_bias_enc_conv\n t_x_s_update_fc_layer, t_x_s_update_conv3d, t_x_s_reset_fc_layer, t_x_s_reset_conv3d, t_x_rs_fc_layer, t_x_rs_conv3d = feat_kernels_enc_3dgru\n t_x_s_update_bias, t_x_s_reset_bias, t_x_rs_bias = feat_bias_enc_3dgru\n\n conv1a = F.conv2d(x, conv1a_wt, bias=conv1a_bias, padding=3) #self.conv1a(x)\n rect1a = self.leaky_relu(conv1a)\n conv1b = F.conv2d(rect1a, conv1b_wt, bias=conv1b_bias, padding=1) #self.conv1b(rect1a)\n rect1 = self.leaky_relu(conv1b)\n pool1 = self.pool(rect1)\n \n \n conv2a = F.conv2d(pool1, conv2a_wt, bias=conv2a_bias, padding=1) #self.conv2a(pool1)\n rect2a = self.leaky_relu(conv2a)\n conv2b = F.conv2d(rect2a, conv2b_wt, bias=conv2b_bias, padding=1) #self.conv2b(rect2a)\n rect2 = self.leaky_relu(conv2b)\n conv2c = F.conv2d(pool1, conv2c_wt, bias=conv2c_bias) #self.conv2c(pool1)\n res2 = conv2c + rect2\n pool2 = self.pool(res2)\n \n \n conv3a = F.conv2d(pool2, conv3a_wt, bias=conv3a_bias, padding=1) #self.conv3a(pool2)\n rect3a = self.leaky_relu(conv3a)\n conv3b = F.conv2d(rect3a, conv3b_wt, bias=conv3b_bias, padding=1) #self.conv3b(rect3a)\n rect3 = self.leaky_relu(conv3b)\n conv3c = F.conv2d(pool2, conv3c_wt, bias=conv3c_bias) #self.conv3c(pool2)\n res3 = conv3c + rect3\n pool3 = self.pool(res3)\n \n conv4a = F.conv2d(pool3, conv4a_wt, bias=conv4a_bias, padding=1) #self.conv4a(pool3)\n rect4a = self.leaky_relu(conv4a)\n conv4b = F.conv2d(rect4a, conv4b_wt, bias=conv4b_bias, padding=1) #self.conv4b(rect4a)\n rect4 = self.leaky_relu(conv4b)\n pool4 = self.pool(rect4)\n \n \n conv5a = F.conv2d(pool4, conv5a_wt, bias=conv5a_bias, padding=1) #self.conv5a(pool4)\n rect5a = self.leaky_relu(conv5a)\n conv5b = F.conv2d(rect5a, conv5b_wt, bias=conv5b_bias, padding=1) #self.conv5b(rect5a)\n rect5 = self.leaky_relu(conv5b)\n conv5c = F.conv2d(pool4, conv5c_wt, bias=conv5c_bias) #self.conv5c(pool4)\n res5 = conv5c + rect5\n pool5 = self.pool(res5)\n \n \n conv6a = F.conv2d(pool5, conv6a_wt, bias=conv6a_bias, padding=1) #self.conv6a(pool5)\n rect6a = self.leaky_relu(conv6a)\n conv6b = F.conv2d(rect6a, conv6b_wt, bias=conv6b_bias, padding=1) #self.conv6b(rect6a)\n rect6 = self.leaky_relu(conv6b)\n res6 = pool5 + rect6\n pool6 = self.pool(res6)\n \n \n pool6 = pool6.view(pool6.size(0), -1)\n \n \n fc7 = F.linear(pool6, feat_kernels_enc_fc[0], bias=feat_bias_enc_fc[0]) #self.fc7(pool6)\n rect7 = self.leaky_relu(fc7)\n \n t_x_s_update = self.t_x_s_update(rect7, h, t_x_s_update_fc_layer, t_x_s_update_conv3d, t_x_s_update_bias)\n t_x_s_reset = self.t_x_s_reset(rect7, h, t_x_s_reset_fc_layer, t_x_s_reset_conv3d, t_x_s_reset_bias)\n \n update_gate = self.sigmoid(t_x_s_update)\n complement_update_gate = 1 - update_gate\n reset_gate = self.sigmoid(t_x_s_reset)\n \n rs = reset_gate * h\n t_x_rs = self.t_x_rs(rect7, rs, t_x_rs_fc_layer, t_x_rs_conv3d, t_x_rs_bias)\n tanh_t_x_rs = self.tanh(t_x_rs)\n \n gru_out = update_gate * h + complement_update_gate * tanh_t_x_rs\n \n return gru_out, update_gate", "def sequential_sar_decoder__2d_attention(self,\n y_prev,\n feat,\n holistic_feat,\n hx1,\n cx1,\n hx2,\n cx2,\n valid_ratios=None):\n _, _, h_feat, w_feat = feat.size()\n if self.dec_gru:\n hx1 = cx1 = self.rnn_decoder_layer1(y_prev, hx1)\n hx2 = cx2 = self.rnn_decoder_layer2(hx1, hx2)\n else:\n # has replaced LSTMCell with LSTM, forward func need rewrite\n _, (hx1,\n cx1) = self.rnn_decoder_layer1(y_prev.unsqueeze(0), (hx1, cx1))\n _, (hx2, cx2) = self.rnn_decoder_layer2(hx1, (hx2, cx2))\n\n tile_hx2 = hx2.view(hx2.size(1), hx2.size(-1), 1, 1)\n attn_query = self.conv1x1_1(tile_hx2) # bsz * attn_size * 1 * 1\n attn_query = attn_query.expand(-1, -1, h_feat, w_feat)\n attn_key = self.conv3x3_1(feat)\n attn_weight = torch.tanh(torch.add(attn_key, attn_query, alpha=1))\n attn_weight = self.conv1x1_2(attn_weight)\n bsz, c, h, w = attn_weight.size()\n assert c == 1\n\n if valid_ratios is not None:\n # cal mask of attention weight\n attn_mask = torch.zeros(bsz, c, h, w + 1).to(attn_weight.device)\n for i, valid_ratio in enumerate(valid_ratios):\n # use torch.ceil to replace original math.ceil and if else in mmocr\n valid_width = torch.tensor(w * valid_ratio).ceil().long()\n # use narrow to replace original [valid_width:] in mmocr\n attn_mask[i].narrow(2, valid_width, w + 1 - valid_width)[:] = 1\n attn_mask = attn_mask[:, :, :, :w]\n attn_weight = attn_weight.masked_fill(attn_mask.bool(), float('-inf'))\n\n attn_weight = F.softmax(attn_weight.view(bsz, -1), dim=-1)\n attn_weight = attn_weight.view(bsz, c, h, w)\n\n attn_feat = torch.sum(\n torch.mul(feat, attn_weight), (2, 3), keepdim=False) # n * c\n\n # linear transformation\n if self.pred_concat:\n y = self.prediction(torch.cat((hx2[0], attn_feat, holistic_feat), 1))\n else:\n y = self.prediction(attn_feat)\n\n return y, hx1, hx1, hx2, hx2", "def feedforward(self, inputs):\n # hidden activations\n # a_hidden = self.transfer(np.dot(self.w_input, inputs))\n a_hidden = self.transfer(np.dot(inputs, self.w_input))\n \n #a_output = self.transfer(np.dot(self.w_output, a_hidden))\n dots = (np.dot(a_hidden, self.w_output))\n a_output = self.transfer(np.asarray(dots))\n\n return (a_hidden, a_output)", "def model_1exp_fft(ns, a, t, s, t0=0):\n a, t, s = physicond(a, t, s)\n\n # auxilary function taking as argument a time array.\n def aux(nu_array):\n rising = a*s / (1j * 2*np.pi * s*nu_array + 1)\n decaying = a*t / (1j * 2*np.pi * t*nu_array + 1)\n offset_phase = np.exp(-1j * 2*np.pi * t0*nu_array)\n return (decaying - rising) * offset_phase * ns\n\n return aux", "def forward(self, hs, ds, h_masks=None, d_masks=None):\n B = ds.size(0)\n device = ds.device\n\n if ds.sum() == 0:\n logging.warning(\n \"predicted durations includes all 0 sequences. \"\n \"fill the first element with 1.\"\n )\n # NOTE(kan-bayashi): This case must not be happened in teacher forcing.\n # It will be happened in inference with a bad duration predictor.\n # So we do not need to care the padded sequence case here.\n ds[ds.sum(dim=1).eq(0)] = 1\n\n if h_masks is None:\n T_feats = ds.sum().int()\n else:\n T_feats = h_masks.size(-1)\n t = torch.arange(0, T_feats).unsqueeze(0).repeat(B, 1).to(device).float()\n if h_masks is not None:\n t = t * h_masks.float()\n\n c = ds.cumsum(dim=-1) - ds / 2\n energy = -1 * self.delta * (t.unsqueeze(-1) - c.unsqueeze(1)) ** 2\n if d_masks is not None:\n energy = energy.masked_fill(\n ~(d_masks.unsqueeze(1).repeat(1, T_feats, 1)), -float(\"inf\")\n )\n\n p_attn = torch.softmax(energy, dim=2) # (B, T_feats, T_text)\n hs = torch.matmul(p_attn, hs)\n return hs", "def pre_forward(self, *args, **kwargs):\n batch_size = args[0].shape[0]\n if not self.instantiated:\n self.hidden_dim = args[0].shape[-1]\n self.instantiate(hidden_dim=self.hidden_dim)\n if self.past_key_reparam is None:\n past_key = self.past_key\n else:\n past_key = self.past_key_reparam\n if self.past_value_reparam is None:\n past_value = self.past_value\n else:\n past_value = self.past_value_reparam\n\n\n def expand_batchsize(x):\n x = x.reshape(self.prefix_token_num, self.num_heads, -1).transpose(0,1)\n x = x.unsqueeze(0).expand(batch_size, *x.shape)\n return x\n # from IPython import embe\n\n if 'past_key_value' not in kwargs or kwargs['past_key_value'] is None:\n kwargs['past_key_value'] = (expand_batchsize(past_key), expand_batchsize(past_value))\n\n if 'attention_mask' in kwargs and kwargs['attention_mask'] is not None:\n am = kwargs['attention_mask'] # Should check the format of the attention_mask when moving to a new plm.\n kwargs['attention_mask'] = torch.cat([-torch.zeros((*am.shape[:-1],self.prefix_token_num), dtype = am.dtype,device=am.device), am], dim=-1)\n elif len(args) >1: # attention mask is passed via positional argument\n am = args[1]\n am = torch.cat([-torch.zeros((*am.shape[:-1],self.prefix_token_num), dtype = am.dtype,device=am.device), am], dim=-1)\n args = (args[0], am) + args[2:]\n # from IPython import embed\n # embed(header = \"Herein prefixroberta\")\n return args, kwargs", "def linear_activation_forward(A_prev, W, b, activation):\n pass", "def forward(self, input, target, device0=None, weight=None, attenuate=2.0):\n if device0 is None:\n device1 = torch.device('cpu')\n else:\n device1 = device0\n input_soft = F.softmax(input, dim=1)\n input_logsoft = F.log_softmax(input, dim=1)\n batch = target.size()[0]\n target_mask = target.reshape(-1, 1)\n input_soft = torch.gather(input_soft, 1, target_mask)\n input_logsoft = torch.gather(input_logsoft, 1, target_mask)\n if weight is None:\n weight_tensor = torch.tensor([1] * batch, device=device1)\n else:\n weight_tensor = weight.repeat(batch, 1).to(device=device1)\n weight_tensor = torch.gather(weight_tensor, 1, target_mask)\n weight_tensor = weight_tensor.reshape(-1, 1)\n focal_weight = weight_tensor * torch.pow(1.0 - input_soft, attenuate)\n # print('focal loss coeff:' + str(focal_weight))\n loss = (-1) * focal_weight * input_logsoft\n loss = torch.mean(loss, dim=0)\n\n\n return loss, focal_weight", "def forward(self, x):\n # BertSelfAttention returns a tuple, so when testing\n # just this layer, we need to take the first element\n # (which is the element that contains the logits)\n if type(x) == tuple:\n x = x[0]\n if len(x.shape) == 1:\n return x[: int(len(x) / 2)] + x[int(len(x) / 2) :]\n elif len(x.shape) == 2:\n result = torch.empty((x.shape[0], int(x.shape[1] / 2)))\n for i, row in enumerate(x):\n result[i] = x[i][: int(len(x[i]) / 2)] + x[i][int(len(x[i]) / 2) :]\n\n if x.is_cuda:\n # print(\"********\")\n result = result.cuda()\n return result\n elif len(x.shape) == 3:\n result = torch.empty((x.shape[0], x.shape[1], int(x.shape[2] / 2)))\n for i, row in enumerate(x):\n for j, col in enumerate(x[i]):\n result[i][j] = (\n x[i][j][: int(len(x[i][j]) / 2)]\n + x[i][j][int(len(x[i][j]) / 2) :]\n )\n\n if x.is_cuda:\n result = result.cuda()\n return result", "def forward(self,\n input,\n hidden,\n encoder_outputs):\n embedded = self.embedding(input).view(1, 1, -1)\n embedded = self.dropout(embedded)\n\n # attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)\n attn_state = hidden[0] if isinstance(hidden, tuple) else hidden\n attn_weights = F.softmax(attn_state[0] @ encoder_outputs.squeeze().t(), dim=1)\n attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.permute(1, 0, 2))\n\n output = torch.cat((embedded[0], attn_applied[0]), 1)\n output = self.attn_combine(output).unsqueeze(0)\n\n output = F.relu(output)\n output, hidden = self.rnn(output, hidden)\n\n output = F.log_softmax(self.out(output[0]), dim=1)\n return output, hidden, attn_weights", "def forward(self, tgt, memory, tgt_mask=None, memory_mask=None):\n # m = memory\n # LayerNorm + SelfAtt + Dropout + Residual\n tgt = self.sublayer[0](tgt, lambda x: self.self_attn(x, x, x, tgt_mask))\n # LayerNorm + Att + Dropout + Residual\n tgt = self.sublayer[1](tgt, lambda x: self.memory_attn(x, memory, memory, memory_mask))\n # LayerNorm + FF + Dropout +Residual\n layer_output = self.sublayer[2](tgt, self.feed_forward)\n return layer_output", "def __init__(self, cfg):\n super(MHBCoAtt, self).__init__()\n self.cfg = cfg\n # word embedding: q_vocab_size, 1024\n self.word_embedding = nn.Embedding(cfg.q_vocab_size, cfg.emb_dim)\n # LSTM\n if cfg.glove:\n self.lstm = nn.LSTM(input_size=cfg.emb_dim*2,\n hidden_size=cfg.hidden_dim,\n num_layers=cfg.num_layers,\n batch_first=True)\n else:\n self.lstm = nn.LSTM(input_size=cfg.emb_dim,\n hidden_size=cfg.hidden_dim,\n num_layers=cfg.num_layers,\n batch_first=True)\n\n self.dropout_l = nn.Dropout(p = 0.3)\n # question attention\n self.ques_att_conv1 = nn.Conv2d(cfg.hidden_dim, 512, [1,1])\n self.ques_att_conv2 = nn.Conv2d(512, 2, [1,1])\n\n # question attentive feature fuse with image feature, according to paper: k * o = 5000, k = 5\n self.ques_proj1 = nn.Linear(2*cfg.hidden_dim, 5000)\n self.img_conv1d = nn.Conv2d(cfg.img_feature_channel, 5000, [1, 1])\n self.dropout_m = nn.Dropout(p = 0.1)\n\n # co-attention conv layers\n self.co_att_conv1 = nn.Conv2d(1000, 512, [1,1])\n self.co_att_conv2 = nn.Conv2d(512, 2, [1,1])\n\n # co_attentive feature fuse with question attentive feature\n self.ques_proj2 = nn.Linear(2*cfg.hidden_dim, 5000)\n self.ques_proj3 = nn.Linear(2*cfg.hidden_dim, 5000)\n self.img_proj2 = nn.Linear(2*cfg.img_feature_channel, 5000)\n self.img_proj3 = nn.Linear(2*cfg.img_feature_channel, 5000)\n\n # prediction fully connected layer\n self.linear_pred = nn.Linear(2000, cfg.a_vocab_size)", "def forward(self, trg, enc_src, trg_mask, src_mask):\n\n # trg = [batch size, trg len, hid dim]\n # enc_src = [batch size, src len, hid dim]\n # trg_mask = [batch size, trg len]\n # src_mask = [batch size, src len]\n\n # self attention\n _trg, _ = self.self_attention(trg, trg, trg, trg_mask)\n\n # dropout, residual connection and layer norm\n trg = self.self_attn_layer_norm(trg + self.dropout(_trg))\n\n # trg = [batch size, trg len, hid dim]\n\n # encoder attention\n _trg, attention = self.encoder_attention(trg, enc_src, enc_src, src_mask)\n\n # dropout, residual connection and layer norm\n trg = self.enc_attn_layer_norm(trg + self.dropout(_trg))\n\n # trg = [batch size, trg len, hid dim]\n\n # positionwise feedforward\n _trg = self.positionwise_feedforward(trg)\n\n # dropout, residual and layer norm\n trg = self.ff_layer_norm(trg + self.dropout(_trg))\n\n # trg = [batch size, trg len, hid dim]\n # attention = [batch size, n heads, trg len, src len]\n\n return trg, attention", "def local_attention(inputs, num_filters, filter_size=5, initializer=None, reuse=None,\n name=''):\n print(\"Local Attentional\")\n weighted_inputs, _ = convolutional_attention(inputs, filter_size=filter_size,\n initializer=initializer, name='local{}'.format(name),\n reuse=reuse)\n conv_output = build_cnn(weighted_inputs, num_filters, filter_sizes=3,\n initializer=initializer, name='local{}'.format(name),\n reuse=reuse)\n print(conv_output)\n return conv_output", "def forward(self,\n spatial_x: Tensor = None,\n semantic_x: Tensor = None,\n memory: Tensor = None,\n src_mask: Tensor = None,\n trg_mask: Tensor = None) -> Tensor:\n # decoder/target self-attention\n\n spatial_x_norm = self.spa_layer_norm(spatial_x) #64\n semantic_x_norm = self.x_layer_norm(semantic_x) #256\n\n # layout attention\n\n h1 = self.trg_trg_att(spatial_x_norm, spatial_x_norm, spatial_x_norm, mask=trg_mask)\n h1 = self.dropout(h1) + spatial_x\n\n# h2 = self.src_trg_att(memory, memory, semantic_x_norm, mask=src_mask)\n# h2 = self.dropout(h2)\n \n o1 = self.feed_forward_h1(h1)\n o2 = memory[:,1:,:]\n o = torch.cat((o2, o1), dim=-1)\n \n return o" ]
[ "0.6917411", "0.67771304", "0.67337334", "0.64585733", "0.6420011", "0.63664645", "0.63517004", "0.63190645", "0.6295559", "0.62826264", "0.6252782", "0.6198423", "0.61942375", "0.6190378", "0.6135776", "0.61074287", "0.6046608", "0.598789", "0.59875965", "0.59709185", "0.5965805", "0.5960066", "0.5955516", "0.59358186", "0.59261125", "0.5925161", "0.59162337", "0.5914254", "0.5894426", "0.58943933", "0.58846354", "0.5871415", "0.5841751", "0.5825398", "0.58060753", "0.5804853", "0.58026344", "0.5798732", "0.57797045", "0.57770944", "0.57767326", "0.57727987", "0.5757793", "0.5756472", "0.5749353", "0.57472867", "0.57461935", "0.5741191", "0.57316273", "0.5716489", "0.5712713", "0.5710979", "0.57096434", "0.56919503", "0.56814057", "0.5678729", "0.56770796", "0.5662596", "0.56609726", "0.56581813", "0.5657479", "0.56565493", "0.564723", "0.56418663", "0.56348693", "0.56341904", "0.56292474", "0.56265056", "0.56238526", "0.56212085", "0.5611699", "0.55966765", "0.5593155", "0.5592373", "0.55737454", "0.5564893", "0.55647635", "0.5559779", "0.5553892", "0.5546608", "0.55319905", "0.5526382", "0.55237484", "0.5520523", "0.5519058", "0.55084825", "0.5504062", "0.5498809", "0.54949826", "0.54905254", "0.5476797", "0.54754955", "0.5474404", "0.5462246", "0.54573953", "0.5449942", "0.54495126", "0.54451406", "0.5443449", "0.5442396" ]
0.5559464
78
Actions before each test.
def setUp(self): self.brow = webdriver.Firefox() staging_server = os.environ.get('STAGING_SERVER') if staging_server: self.live_server_url = "http://" + staging_server
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_run_tests(cls):\n pass", "def before_test(self, func, *args, **kwargs):\n pass", "def do_before(self):\r\n pass", "def beforeTest(self, test):\n self.setupLoghandler()", "def before(self) -> None:\n pass", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\r\n pass # nothing used by all\r", "def setUp(self):\n\n pass", "def setUp(self):\n\n pass", "def setUp(self):\n print(\"New test by Nikolay Melnik\")", "def setUp(self):\n pass #because we dont have anything to setup.", "def before_each_test(self, request):\n self.test_counter = Counter()\n self.check_ref = request.config.getvalue(\"check_ref\")\n self.create_ref = request.config.getvalue(\"create_ref\")", "def setUp(self):\n \n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self) :\n pass", "def setUp(self):\n\n return", "def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')", "def setUp(self):\r\n pass # nothing required by all\r", "def setUp(self) -> None:\n pass", "def setUp(self) -> None:\n pass", "def startTestHook(self):", "def setUp(self):\n MainTests.setUp(self)", "def setUp(self):\n self", "def setUp(self):\n self", "def setUp(self):\r\n pass", "def setUp(self):\n\n BaseTest.setUp(self)", "def setUp(self):\n setUp()", "def setUp(self):\n assert COMMANDS.keys() == EXPCT_RESULTS.keys()\n self.tests = []\n self.test_numbers = deque(sorted(COMMANDS.keys()))", "def setUp(self):\n self.setup_beets()", "def run_before(self):\n\n for path in self.hooks.get('before', []):\n self.run_module(path)", "def setUp(self):\n print('Calling \\'setUp\\'')", "def setUpClass(self):\n\n self.test_a_summarize()\n self.test_b_store()\n self.test_c_get_existing_records()\n self.test_d_remove_database()", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUpTestCase(self):\n pass", "def setUp(self):\n\t\tself.testCases = [\n\t\t\t{\n\t\t\t\t'show': \"House\",\n\t\t\t\t'episode': 11,\n\t\t\t\t'season': 3,\n\t\t\t\t'title': \"Words and Deeds\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Lost\",\n\t\t\t\t'episode': 21,\n\t\t\t\t'season': 2,\n\t\t\t\t'title': \"?\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Heroes\",\n\t\t\t\t'episode': 15,\n\t\t\t\t'season': 1,\n\t\t\t\t'title': \"Run!\"\n\t\t\t}\n\t\t]", "def setUp(self) -> None:\n super().setUp()\n\n self.test_action = TestAction()\n self.test_header_action = TestHeaderAction()\n self.test_menu_action = TestMenuAction()\n self.test_menu_item_action = TestMenuItemAction()", "def test_before(self):\n\n support.create_project(self, 'candice')\n support.add_step(self)\n support.add_step(self, position='0')\n\n project = cauldron.project.get_internal_project()\n steps = project.steps\n\n self.assertTrue(steps[0].filename.startswith('S01'))\n self.assertTrue(steps[1].filename.startswith('S02'))", "def setUp(self):\n test_env_setup()", "def startTestRun(self):", "def setUp(self):\n\n self._set_up()", "def setUp(self) -> None:\n return super().setUp()", "def setUp(self):\n print(\"\\nIn setUp()...\")", "def setUp(self) -> None:\n create_test_categories()", "def setUp(self):\n super(BasicTestCase, self).setUp()", "def before(self, context):\n raise NotImplementedError", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_RMT_Util:\", self._testMethodName)", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def test_begin(self):", "def setUp(self):\n print 'unittest.setUp()'\n pass", "def setUp(self):\n # use self.attribute to keep anything which needs to be accessed later\n print('setUp method\\n')", "def setUp(self):\n raise NotImplementedError", "def setUp_extra(self):\n pass", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def test_run_started(self):", "def setUp(self):\r\n\r\n # Get the Flask test client\r\n self.client = app.test_client()\r\n\r\n # Show Flask errors that happen during tests\r\n app.config['TESTING'] = True", "def setUp(self):\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n self.headers = list()\n self.status = list()", "def pytest_before_group_items(session, config, items):", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n\n # ISSUE007\n # TODO, pyunit's bright idea is to call setup before each test. It\n # was defining multiple patterns which was annoying but not a problem.\n # The cleanest way to do things is probably to remove patterns after\n # the test, but we don't have that functionality. For now just create\n # one pattern to avoid confusion, but do it by hacking in a global\n # variable\n\n global firstTime\n\n if not firstTime:\n return\n firstTime = False\n\n # get the full source name for even and odd sources\n out_of_order_numbers = quilt_test_core.get_source_name(\n \"out_of_order_numbers\")\n\n # TODO REad the pattern id from the std output then query that one\n # See ISSUE007 and ISSUE008\n # call quilt_define with the pattern code and name query\n # dups_follows\n quilt_test_core.call_quilt_script('quilt_define.py', ['-n',\n 'out_of_order',\n 'source(\"' + out_of_order_numbers + '\",\"grep\")'])", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n # Get the Flask test client\n self.client = app.test_client()\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True" ]
[ "0.7848565", "0.7659674", "0.7591663", "0.7281655", "0.7241673", "0.7141255", "0.70754164", "0.7027952", "0.7027952", "0.7026638", "0.70223314", "0.6992696", "0.69685715", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6968439", "0.6962401", "0.69473654", "0.6943962", "0.69378036", "0.6929757", "0.6929757", "0.69093055", "0.6905022", "0.6900968", "0.6900968", "0.68944275", "0.6890875", "0.68860793", "0.68751407", "0.68577385", "0.6820632", "0.6790215", "0.6763471", "0.67596936", "0.67596936", "0.67596936", "0.67596936", "0.67596936", "0.67596936", "0.67596936", "0.67596936", "0.67596936", "0.6742395", "0.6739187", "0.6716386", "0.6709117", "0.66807485", "0.6660571", "0.6654884", "0.663257", "0.6627491", "0.661747", "0.6581771", "0.65786153", "0.65595317", "0.65383077", "0.65383077", "0.65377456", "0.6511088", "0.6496308", "0.6481446", "0.64503133", "0.6449556", "0.6449556", "0.6449556", "0.6445779", "0.64384484", "0.64186496", "0.6414822", "0.64133334", "0.6411665", "0.6411665", "0.64110166", "0.6408956", "0.6408956", "0.64068896" ]
0.0
-1
Actions after each test.
def tearDown(self): self.brow.quit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def after_test(self, test_results):\n pass", "def after_test(self, func, *args, **kwargs):\n pass", "def after(self):\n pass", "def after(self):\n pass", "def after_all(self) -> None:", "def finished_tests(self):\n self.testing = 0", "def tearDown(self):\n\t\tprint(\"end test\")\n\t\tpass", "def on_test_end(self):\n for callback in self.callbacks:\n callback.on_test_end(self, self.get_model())", "def after(self) -> None:\n pass", "def test_finished(self):\n\n # We'll start the next test in an idle, so that the current one is\n # properly terminated, and we do not execute in its context\n\n GLib.idle_add(self._do_test)", "def tearDown(self):\n pass\n # teardown called after each test\n # e.g. maybe write test results to some text file", "def do_after(self):\r\n pass", "def on_test_end(self, logs=None):", "def test_after_install(self):\n self.run_test_suites(self.after_install_test_suite_list)", "def execute(self):\n for test in self.tests:\n test.execute()\n self.logger.dump()\n print(\"Finished!\")", "def finished_tests(self):\n self.testing = 0\n if not self.closing:\n self.enable_menus(1)\n self.parent.finished_tests()", "def test_run_ended(self):", "def tearDown(self):\n self.teardown_beets()", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def tearDown(self):\r\n testing.tearDown()", "def tearDown(self):\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\n print('Calling \\'tearDown\\'')", "def tearDown(self) :\n pass", "def tearDown(self) :\n pass", "def tearDown(self) :\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass" ]
[ "0.83760184", "0.79627854", "0.76740867", "0.76740867", "0.764812", "0.75801444", "0.7479539", "0.7362819", "0.73584396", "0.7329822", "0.72882223", "0.7255437", "0.7153491", "0.70499015", "0.7021705", "0.6966801", "0.6941674", "0.6791313", "0.677205", "0.6720164", "0.67138517", "0.67052054", "0.67052054", "0.67052054", "0.67052054", "0.67052054", "0.668138", "0.66606927", "0.66606927", "0.66606927", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214", "0.6644214" ]
0.0
-1
Get servers local IP address so it can be accessed inside this network.
def print_local_ip(): spacer = '-' * 50 local_ip = gethostbyname(gethostname()) print('\n{}\nLocal IP address is: {}\n{}'.format(spacer, local_ip, spacer))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def localip(self) :\n\t\ttry :\n\t\t\treturn self._localip\n\t\texcept Exception as e:\n\t\t\traise e", "def get_local_host_ip(self) -> str:", "def get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n local_ip = sock.getsockname()[0]\n sock.close()\n\n return local_ip", "def get_local_ip(self, system):\n if system == \"Linux\":\n # This is a bit ugly but it works\n ips = check_output(['hostname', '--all-ip-addresses']).decode(\"utf-8\")\n return ips.split(\" \")[0]\n else:\n return socket.gethostbyname(socket.gethostname())", "def local(self):\n return self.server.server_address", "def getLocalIP():\r\n try:\r\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n csock.connect(('8.8.8.8', 80))\r\n (addr, port) = csock.getsockname()\r\n csock.close()\r\n return addr\r\n except socket.error:\r\n return \"127.0.0.1\"", "def getLocalhostIP():\n return socket.getaddrinfo('localhost', 0)[0][4][0]", "def _get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n sock.connect(('10.255.255.255', 1))\n ip = sock.getsockname()[0]\n except Exception:\n ip = '127.0.0.1'\n finally:\n sock.close()\n\n return ip", "def get_local_ip(self):\n # Get the local IP address used to communicate with the GNS3\n # server. Not the GNS3 server's address, but rather the local\n # machine's address that we use to send messages to the GNS3\n # server. If that address isn't 127.0.0.1 (localhost), use it.\n server_local_ip = self.server.get_local_ip()\n if server_local_ip != '127.0.0.1':\n return server_local_ip\n else:\n # Otherwise, find the first interface on the first cloud node (if it exists)\n try:\n first_cloud_node = next(node for node in self.nodes() if node['node_type'] == 'cloud')\n interface = first_cloud_node['properties']['ports_mapping'][0]['interface']\n\n # If the interface is virtual, find and record its\n # mate's first IP address, which is the address we can\n # send to.\n\n ip_proc = subprocess.Popen(['ip', 'link', 'show', interface], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n first_field = ip_proc.stdout.read().decode().split()[1].split('@')\n if first_field[0] == interface:\n paired_interface = first_field[1].split(':')[0]\n return ni.ifaddresses(paired_interface)[ni.AF_INET][0]['addr']\n except (StopIteration, ValueError):\n # StopIteration if there are no cloud nodes\n # ValueError if there are no IP addresses on the paired interface\n pass\n\n return None", "def get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n sock.connect(('8.8.8.8', 1))\n ip = sock.getsockname()[0]\n except:\n ip = '127.0.0.1'\n finally:\n sock.close()\n return ip", "def getLocalIpAddress() :\n \n if (platform.system() == 'Linux') :\n cmd = \"ifconfig wlan0 | grep 'inet addr:' | cut -d: -f2 | awk '{print $1}'\"\n return subprocess.check_output(cmd, shell=True) \n else : # Darwin\n return socket.gethostbyname(socket.gethostname())", "def get_local_ip():\n try:\n ip_task = os.popen(\"ifconfig | grep -Eo 'inet (addr:)?(Adresse:)?([0-9]*\\.){3}[0-9]*' | grep -Eo '([0-9]*\\.){3}[0-9]*' | grep -v '127.0.0.1'\")\n local_ip = ip_task.read().strip()\n ip_task.close()\n if '\\n' in local_ip:\n local_ip = local_ip.split('\\n')[0]\n print ' >> got local ip:', local_ip\n return local_ip\n except:\n return '0.0.0.0'", "def get_local_ip():\n\n return os.environ[LOCAL_IP_KEY]", "def server_ip(self) -> str:\n return pulumi.get(self, \"server_ip\")", "def get_localhost_ip():\n try:\n return [\n (s.connect((NAME_SERVER, 80)), s.getsockname()[0], s.close())\n for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]\n ][0][1]\n except Exception:\n return '127.0.0.1'", "def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())", "def get_ip_address():\n\n # Windows\n if _IS_WINDOWS:\n local_ip = socket.gethostbyname(socket.gethostname())\n else:\n # Linux and MacOS\n local_ip = None\n try:\n # First way, tested in Ubuntu and MacOS\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n local_ip = s.getsockname()[0]\n s.close()\n except:\n # Second way, tested in CentOS\n try:\n local_ip = socket.gethostbyname(socket.gethostname())\n except:\n pass\n\n if local_ip == None or local_ip == '127.0.0.1' or local_ip == '127.0.1.1':\n logger.warning(\n 'get_ip_address failed, please set ip address manually.')\n return None\n\n return local_ip", "def server_ip(self):\n return self._server_ip", "def get_self_ip():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()\n s.close()\n return ip[0]", "def localhost_IP(self):\r\n return self._localhost_ip", "def get_IP():\n\n return socket.gethostbyname(socket.gethostname())", "def get_server_ip(srv):\n pass", "def get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n ip = s.getsockname()[0]\n except Exception:\n ip = '127.0.0.1'\n finally:\n s.close()\n return ip", "def local_ip(self) -> Optional[str]:\n if not self._send_parse_reply(b\"AT+IPADDR\", b\"+IPADDR:\"):\n return None\n return self._buf", "def _get_my_ip():\n try:\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n csock.connect(('8.8.8.8', 80))\n (addr, port) = csock.getsockname()\n csock.close()\n return addr\n except socket.error:\n return \"127.0.0.1\"", "def get_my_ip():\r\n try:\r\n return [x[4] for x in conf.route.routes if x[2] != '0.0.0.0'][0]\r\n except IndexError:\r\n return '127.0.0.1'", "def get_ip():\n with hide(\"everything\"):\n ip_addresses = run('hostname -I').split(' ')\n return ip_addresses[0]", "def get_host_ip_addr():\n return nova_conf.my_ip", "def getPublicIp():\n global PUBLIC_IP\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n PUBLIC_IP = s.getsockname()[0]\n except Exception:\n PUBLIC_IP = '127.0.0.1'\n finally:\n s.close()\n return PUBLIC_IP", "def getPublicIP():\n try:\n # Try to get the internet-facing IP by attempting a connection\n # to a non-existent server and reading what IP was used.\n with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:\n # 203.0.113.0/24 is reserved as TEST-NET-3 by RFC 5737, so\n # there is guaranteed to be no one listening on the other\n # end (and we won't accidentally DOS anyone).\n sock.connect(('203.0.113.1', 1))\n ip = sock.getsockname()[0]\n return ip\n except:\n # Something went terribly wrong. Just give loopback rather\n # than killing everything, because this is often called just\n # to provide a default argument\n return '127.0.0.1'", "def local_ip():\n sys_name = system()\n if sys_name == 'Darwin':\n # OSX\n route = Command('route')\n ifconfig = Command('ifconfig')\n\n iface = [\n line.strip()\n for line in route('-n', 'get', 'default')\n if line.strip().startswith('interface')\n ][0].split(':')[1].strip()\n return [\n line.strip()\n for line in ifconfig(iface)\n if line.strip().startswith('inet ')\n ][0].split(' ')[1]\n elif sys_name == 'Linux':\n try:\n ip = Command('ip')\n iface = [\n line.strip()\n for line in ip('route')\n if line.strip().startswith('default ')\n ][0].split(' ')[4]\n except CommandNotFound:\n route = Command('route')\n iface = [\n line.strip()\n for line in route('-n')\n if line.startswith('0.0.0.0')\n ][0].split(' ').pop()\n\n try:\n # try with IP\n ip = Command('ip')\n return [\n line.strip()\n for line in ip('addr', 'show', iface)\n if line.strip().startswith('inet ')\n ][0].split(' ')[1].split('/')[0]\n except CommandNotFound:\n pass\n\n # fallback to ifconfig\n ifconfig = Command('ifconfig')\n return [\n line.strip()\n for line in ifconfig(iface)\n if line.strip().startswith('inet ')\n ][0].split(' ')[1]\n\n return None", "def detect_ip_address():\n # Rather hackish way to get the local ip-address, recipy from\n # https://stackoverflow.com/a/166589\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip_address = s.getsockname()[0]\n s.close()\n return ip_address", "def get_remote_addr(self):\n connection = self._open_db()\n cursor = connection.cursor()\n cursor.execute('SELECT remote_addr FROM sessions WHERE id = ?;', \\\n (self.sid,))\n remote_addr = cursor.fetchone()\n cursor.close()\n connection.close()\n return remote_addr[0]", "def get_ip_address(self):\n return self.adb.get_ip_address()", "def get_public_ip():\n public_ip = get('https://api.ipify.org').text\n return public_ip", "def get_my_ip_address(remote_server=\"google.com\"):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: \n s.connect((remote_server, 80))\n return s.getsockname()[0]", "def ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip", "def _open_stack_get_ip_(srv):\n addr_info = srv.addresses\n for net in addr_info.keys():\n for addr in addr_info[net]:\n ip = addr['addr']\n return ip", "def address_local(self):\n if self.local_ip is None or self.port is None:\n return None\n return URL_API.format(ip=self.local_ip, port=self.port)", "def get_ip():\n return os.getenv(\"HOST_IP\", \"127.0.0.1\")", "def get_externalip(self):\n\n myip = \"\"\n for i in range(5):\n myip = self.fetch(random.choice(self.server_list))\n if myip != \"\":\n return myip\n else:\n continue\n return \"\"", "def local_address(self) -> T_SockAddr:\n from anyio._core._sockets import convert_ipv6_sockaddr\n return convert_ipv6_sockaddr(self.raw_socket.getsockname())", "def getMyIP():\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.connect(('8.8.8.8', 1)) # connect() for UDP doesn't send packets\r\n return s.getsockname()[0]", "def get_ip():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n return s.getsockname()[0]\n except:\n return '127.0.0.1'\n finally:\n s.close()", "def get_public_ip(self):\n return self.public_ip", "def address(self):\n \n return self.__ip", "def get_ip(self):", "def get_remote_ip(request):\n \n return utilities.get_remote_ip(request)", "def remoteip(self) :\n\t\ttry :\n\t\t\treturn self._remoteip\n\t\texcept Exception as e:\n\t\t\traise e", "def get_ip_address():\n try:\n return socket.gethostbyname(socket.getfqdn())\n except socket.gaierror as error:\n logger.warn(error)\n return socket.gethostbyname(\"\")", "def find_local_host_ipv4(server_to_test_against):\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((server_to_test_against, 80))\n local_ipv4 = (s.getsockname()[0])\n s.close()\n return local_ipv4", "def get_ip_address(self):\n return self.__ip_address", "def get_default_server_ip(cls):\n \n _position = cls.basic_parameters[1]\n \n return _position['server_ip']", "def ip(self):\n return os.environ.get('REMOTE_ADDR')", "def get_ip() -> str:\n for ip in socket.gethostbyname_ex(socket.gethostname())[2]:\n if not ip.startswith(\"127.\"):\n return ip\n for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]:\n s.connect((\"8.8.8.8\", 53))\n ip, port = s.getsockname()\n s.close()\n if not ip.startswith(\"127.\"):\n return ip\n raise ConnectionError(\"Can not get a suitable IP\")", "def publicIP(self):\n return self.query('https://plex.tv/:/ip')", "def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)", "def get_IPaddress():\n config = get_ifconfig()\n return config[0]", "def external_IP(self):\r\n return self._external_ip", "def public_ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"public_ip_address\")", "def ip_address(self) -> str:\n return pulumi.get(self, \"ip_address\")", "def get_ip_address(self):\n raise NotImplementedError", "def ip_address(self) -> str:\n return self._device.ip if self.is_connected else None", "def get_ip():\n return request.environ['HTTP_REMOTE_ADDR']", "def ip(self):\n if not self._ip:\n if 'ip' in self.config:\n ip = self.config['ip']\n else:\n ip = self.protocol.transport.get_extra_info('sockname')[0]\n ip = ip_address(ip)\n if ip.version == 4:\n self._ip = ip\n else: # pragma: no cover\n response = urlopen('http://ipv4.icanhazip.com/')\n ip = response.read().strip().decode()\n ip = ip_address(ip)\n self._ip = ip\n return self._ip", "def remote_addr(env):\r\n # In production the remote address is always the load balancer\r\n # So check X-Forwarded-For first\r\n # E.g. HTTP_X_FORWARDED_FOR: '66.249.72.73, 75.101.144.164'\r\n if env.has_key('HTTP_X_FORWARDED_FOR'):\r\n ips = re.split(r'\\s*,\\s*', env['HTTP_X_FORWARDED_FOR'])\r\n if len(ips) > 0:\r\n return ips[0]\r\n\r\n return env['REMOTE_ADDR']", "def remote_addr(self):\n return self._environ.get('REMOTE_ADDR', '0.0.0.0')", "def remote_addr(self):\r\n return self._environ.get('REMOTE_ADDR', '0.0.0.0')", "def ip(self) -> str:\n return pulumi.get(self, \"ip\")", "def ip_addr(self):\n return self.ip_addresses[0]", "def sitepublicip(self) :\n\t\ttry :\n\t\t\treturn self._sitepublicip\n\t\texcept Exception as e:\n\t\t\traise e", "def masterIP(self):\r\n return self._masterIP", "def get_ip():\n if not request.headers.getlist(\"X-Forwarded-For\"):\n return str(request.remote_addr)\n else:\n return str(request.headers.getlist(\"X-Forwarded-For\")[0])", "def get_node_ip(self):\n return ray.services.get_node_ip_address()", "def get_client_ip(self, request):\n xforward_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if xforward_for:\n return xforward_for.split(',')[0]\n return request.META.get('REMOTE_ADDR')", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def get_current_ip(self):\n response = get(ICANHAZIP, proxies={\"http\": self.local_http_proxy})\n\n if response.ok:\n return self._get_response_text(response)\n\n raise TorIpError(\"Failed to get the current Tor IP\")", "async def get_ip(self) -> Union[IPv4Address, IPv6Address]:\n xff = await self.get_x_forwarded_for()\n if xff: return xff[0]\n ip_addr = self._request.transport.get_extra_info('peername')[0]\n return ip_address(ip_addr)", "def get_addr(self):\n return Server.t_addresses.get(threading.get_ident())", "def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()", "def __lookup_public_ip(self):\n\n response = requests.get('https://api.ipify.org?format=json', timeout=self.timeout)\n\n if response.status_code == 200:\n ip_data = response.json()\n if 'ip' not in ip_data.keys():\n return 'Unable to determine IP'\n else:\n return ip_data['ip']\n else:\n return 'Unable to determine IP'", "def getIp(self):\n raise NotImplementedError", "def obtain_public_ip():\n from urllib2 import urlopen\n my_ip = urlopen('http://ip.42.pl/raw').read()\n logger.debug('The public ip is: %s' % my_ip)\n return str(my_ip)", "def remote_addr(self):\r\n route = self.remote_route\r\n return route[0] if route else None", "def get_ip(self, node_id):\n return self.get_ip_network()[node_id]", "def client_ip(self):\n return self._client_ip", "def ip_address(self):\n return self.address", "def get_host_ipaddress(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetHostIPAddress', self.handle)", "def get_ip(pc_name):\n pc_ip = '' \n try: \n pc_ip = socket.gethostbyname(pc_name) \n except Exception, e:\n initlog('failed to get PC ip; %s' % str(e)) \n return pc_ip", "def _get_ip():\n cmd_netstat = ['netstat', '-nr']\n p1 = subprocess.Popen(cmd_netstat, stdout=subprocess.PIPE)\n cmd_grep = ['grep', '^0\\.0\\.0\\.0']\n p2 = subprocess.Popen(cmd_grep, stdin=p1.stdout, stdout=subprocess.PIPE)\n cmd_awk = ['awk', '{ print $2 }']\n p3 = subprocess.Popen(cmd_awk, stdin=p2.stdout, stdout=subprocess.PIPE)\n galaxy_ip = p3.stdout.read()\n log.debug('Host IP determined to be %s', galaxy_ip)\n return galaxy_ip", "def get_host_ip(self, obj, host):\n\n server = self.nova(obj).server_get(host)\n return server.access_ipv4", "def getClientIP(self):\n if isinstance(self.client, IPv4Address):\n return self.client.host\n return None", "def server_address(self):\n return self._server_address", "def get_main_ipv4():\n try:\n # No data is actually transmitted (UDP)\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect( ('8.8.8.8', 53) )\n real_ip = s.getsockname()[0]\n s.close()\n return real_ip\n except socket.error as e:\n logging.error(\"Cannot retrieve current IPv4 address: %s\" % e)\n return None", "def get_device_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n ip = sock.getsockname()[0]\n sock.close()\n return ip", "def get_global_ip():\n network_info_providers = [\n 'http://api.ipify.org/',\n 'http://myip.dnsomatic.com',\n 'http://inet-ip.info/ip',\n 'http://v4.ident.me/',\n ]\n random.shuffle(network_info_providers)\n for url in network_info_providers:\n try:\n return requests.get(url).text.lstrip().rstrip()\n except Exception:\n continue\n else:\n log.info('cannot find global ip')\n return \"\"", "def internal_IP(self):\r\n return self._internal_ip", "def _get_service_local_address(self, context, vpnservice):\n router_id = vpnservice['router_id']\n # check if this router already have an IP\n port = self._find_vpn_service_port(context, router_id)\n if not port:\n # create a new port, on the external network of the router\n # Note(asarfaty): using a unique device owner and device id to\n # make sure tis port will be ignored in certain queries\n ext_net = vpnservice['router']['gw_port']['network_id']\n port_data = {\n 'port': {\n 'network_id': ext_net,\n 'name': 'VPN local address port',\n 'admin_state_up': True,\n 'device_id': 'router-' + router_id,\n 'device_owner': ipsec_utils.VPN_PORT_OWNER,\n 'fixed_ips': constants.ATTR_NOT_SPECIFIED,\n 'mac_address': constants.ATTR_NOT_SPECIFIED,\n 'port_security_enabled': False,\n 'tenant_id': vpnservice['tenant_id']}}\n port = self.l3_plugin.base_create_port(context, port_data)\n # return the port ip as the local address\n return port['fixed_ips'][0]['ip_address']", "def get_client_ip(request):\n x_forwarded_for = request.META.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n i_p = x_forwarded_for.split(\",\")[0]\n else:\n i_p = request.META.get(\"REMOTE_ADDR\")\n return i_p", "def get_internal_host(self):\n prefer_internal_ip = self.charm_config.get(\"prefer-internal-ip\")\n fqdn = socket.getfqdn()\n ip = socket.gethostbyname(fqdn)\n if prefer_internal_ip:\n return ip\n return fqdn" ]
[ "0.8273781", "0.8249747", "0.8185857", "0.8137731", "0.80874467", "0.80454546", "0.80384547", "0.8009976", "0.7990402", "0.7969245", "0.7930696", "0.7782699", "0.77280563", "0.7713387", "0.7654462", "0.75452614", "0.75112414", "0.75089806", "0.7462935", "0.7451744", "0.7441398", "0.7409012", "0.73485583", "0.732427", "0.7306175", "0.7277429", "0.7253821", "0.72447777", "0.7210376", "0.7168964", "0.7132055", "0.7101454", "0.7090591", "0.7088883", "0.7086673", "0.70732915", "0.7057392", "0.70491177", "0.70466197", "0.7044538", "0.7024574", "0.7011496", "0.69971526", "0.6979833", "0.6975853", "0.69741994", "0.69677216", "0.69454384", "0.6939042", "0.6934068", "0.6925815", "0.6913133", "0.69117206", "0.6887703", "0.68787575", "0.6876714", "0.6846522", "0.68443763", "0.68422264", "0.6842112", "0.68396074", "0.68378323", "0.68264365", "0.6816208", "0.67871547", "0.6770892", "0.67696095", "0.6769252", "0.6768514", "0.67666924", "0.67666507", "0.67653483", "0.67614615", "0.67533696", "0.67401326", "0.6737846", "0.6735847", "0.6724474", "0.67217", "0.6718513", "0.6712594", "0.6710707", "0.6704856", "0.67047673", "0.6699316", "0.66949725", "0.667307", "0.66699797", "0.6666051", "0.6657737", "0.6657288", "0.6654147", "0.6647332", "0.6637085", "0.6630937", "0.66296124", "0.6626659", "0.66228175", "0.662207", "0.6619849" ]
0.7378908
22
Amount which will increase x until it's divisible evenly by 64
def padlen_64(x: int): return (64 - (x % 64)) % 64
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modReduce(self, x):\n\n assert 0 <= x < pow(self.mod, 2), 'out of range.'\n q = (x * self.u) >> (2 * self.M_bit)\n r = x - q * self.mod\n while r >= self.mod:\n r -= self.mod\n return r", "def taille(x):\n n = 0\n \n while (2**n) -1 < x :\n n+=1\n \n return(n)", "def foo_4(x):\n\tresult=1\n\tfor i in range(1, x+1):\n\t\tresult=result * i\n\treturn result", "def bulk_modulus():\n\n return 10000.0", "def mod_5(x):\r\n return x%5", "def pseudo(x,N) :\n\treturn (x**2+1)%N", "def mod_5(x):\n return x % 5", "def mod_5(x):\n return x % 5", "def enlarge(n):\n\n return n* 100", "def enlarge(n):\n return n*100", "def enlarge(n):\r\n return n * 100", "def num (x):\n\n if not x:\n return None\n seed = 1\n scale = Fraction(1,1)\n lone = None\n num = Fraction(0,1)\n while not x == seed:\n\n if not lone and le(_abs(sub(x,seed)),pos):\n lone = True\n if le(seed,x):\n seed = seed*2+1\n num += scale\n lone = lone or le(x,seed)\n else:\n seed = seed*2\n num -= scale\n lone = lone or le(seed,x)\n if lone:\n scale *= Fraction(1,2)\n return num", "def generator(factor: int, test: typing.Callable[[int], bool],\n start: int) -> typing.Iterator[int]:\n value = start\n while True:\n value = (value * factor) % 2147483647\n if test(value):\n yield value", "def Sum_Numbers_x_Power_Digits(x):\n totalSum = 0 \n for i in xrange(10, 999999):\n if i == sum([int(j)**x for j in str(i)]):\n totalSum += i\n return totalSum", "def five():\r\n \r\n n = 20\r\n divisible = False\r\n \r\n while divisible == False:\r\n n += 20\r\n divisible = True\r\n for i in range(20, 0, -1):\r\n if n % i != 0:\r\n divisible = False\r\n break\r\n return n", "def next_power_2(x: int) -> int:\n return 0 if x < 1 else shift_left_bit_length(x)", "def question_29(x: int) -> int:\n # Base case below:\n if x == 0:\n return 1\n # Recursive function below:\n else:\n result = 1\n while x > 0:\n for i in range(x):\n result *= question_29(i)\n x -= 1\n return result * 2 * 1", "def question_30(x: int) -> int:\n # Base case below:\n if x == 0:\n return 1\n # Recursive function below:\n else:\n result = 1\n while x > 0:\n for i in range(x):\n result *= 2 ** question_30(i)\n x -= 1\n return result", "def factorial(x):\r\n res = 1\r\n for i in range (1, x+1)\r\n res *= i\r\n return res", "def multiply_by_4(x):\n\treturn int(x) * 4", "def perfect_hash(num):\n return ((num+OFFSET)*(SIZE/PERIOD)) % (SIZE+1) + 1", "def _next_power_of_2(x):\n return 1 if x == 0 else 2**(x - 1).bit_length()", "def factorial(x):\r\n output = 1\r\n for factor in range(2,x+1):\r\n output = output * factor\r\n return output", "def problem_48():\n\n return int(str(sum(x**x for x in range(1, 1001)))[-10:])", "def my_func(x, y):\n result = 0\n pow_res = 1\n while y:\n pow_res= pow_res*x\n y +=1\n\n result = 1 / pow_res\n\n\n return result", "def r1(x, n, max_size=32):\n return (x << n) % (2 << (max_size - 1)) + (x >> (max_size - n))", "def modulus_raknare(steps):\n i = 0\n\n def next_step():\n nonlocal i\n i = (i + 1) % steps\n return i\n return next_step", "def nextpow2(x):\n log2_n = math.ceil(math.log2(x))\n n = 2 ** log2_n\n return n", "def prime_counting_function_inv(y):\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x", "def one():\r\n \r\n i = 1\r\n sum = 0\r\n while i < 1000:\r\n if i % 3 == 0 or i % 5 == 0:\r\n sum = sum + i\r\n i = i + 1\r\n else:\r\n i = i + 1\r\n return sum", "def myExp(base,exponent,modulus):\n result = 1\n while exponent > 0:\n if exponent & 1 == 1:\n result = (result * base) % modulus\n exponent = exponent >> 1\n base = (base * base) % modulus\n return result", "def rng(x):\n\n\tm = 2**31 - 1\n\ta = 48271\n\tc = 0\n\treturn (a*x + c)%m", "def obtain_factorial(x):\n product = 1\n for ii in list(range(x)):\n product = product * (ii + 1)\n\n return(product)", "def generator(factor, current, condition=0):\n while True:\n next_current = factor * current % 2147483647\n if condition == 0 or (next_current % condition == 0):\n yield next_current\n current = next_current", "def add_x(self, x, add):\n return (x + add) % self.x_len", "def get_min_run(n):\n r = 0\n while n >= 64:\n r |= n & 1\n n >>= 1\n return n + r", "def incrment_1(x):\n return(x + 1)", "def num_permut(x) -> tuple:\n h = x // 100\n d = (x % 100) // 10\n n = x % 10\n return(n * 100 + d * 10 + h)", "def pow2(x: int, p: int) -> int:\n while p > 0:\n x = x * x % q\n p -= 1\n return x", "def mod_pow(x,e,p):\n x = x % p\n R = 1\n while e > 0 :\n if (e%2) == 1 :\n R = (R*x) % p\n e = e//2\n x = (x*x) % p \n return(R)", "def addmod(x, y, mod=2 ** 32):\n r = x + int(y)\n return r if r < mod else r - mod", "def square_and_multiply(x, exponent, n):\n result = 1\n while exponent > 0:\n if exponent % 2:\n result = (result * x) % n\n x = (x * x) % n\n exponent = exponent // 2\n return result", "def hash(x):\r\n return (randint(1,5*c)*x + randint(1,5*c))%c", "def evansMod(x,n):\n if x%n == 0:\n return 1\n else:\n return 0", "def unit_step(x):\n if x < 0:\n return 0\n\n return 1", "def factorial(x):\n value = 1\n for i in range(2, add(x, 1)):\n value = multiply(value, i)\n return value", "def calculate(x: int) -> int:\n\n digits = list(map(int, list(str(x))))\n return sum(list(map(lambda a: a**2, digits)))", "def player_increment(prev_player: int) -> int:\n return (prev_player + 1) % 5", "def r_soft_hash(x):\n if abs(x) < 1e-9:return 0\n # round it to some number of bits\n b = ns.round(ns.log(abs(x)) / ns.log(2))\n gran = 2**(b-30)\n return ns.round(x / gran) * gran", "def blum_blum_shub(modulus_length=512):\n modulus = make_modulus(num_bits=modulus_length)\n\n def f(inputInt):\n return pow(inputInt, 2, modulus)\n\n return f", "def improve_power(x):\r\n for i in range(2,base(x)//2+1):\r\n if(base(x)%i==0):\r\n temp=base(x)\r\n n=0\r\n flag=True\r\n while(temp>1):\r\n if(temp%i!=0):\r\n flag=False\r\n break\r\n else:\r\n temp=temp/i\r\n n=n+1\r\n if (flag):\r\n return(make_power(i,n*power(x)))\r\n return (make_power(x(0), x(1)))", "def next_pow_two(n):\n i = 1\n while i < n:\n i = i << 1\n return i", "def divisor_num(x):\n factor_pow = map(lambda y: y + 1, factorint(x).values())\n div_num = reduce(mul, factor_pow)\n return div_num", "def Incrpower(self, increment):\n self.power += increment", "def foo_6(x): ## calculate the factorial of x in a different way\n\tfacto=1\n\twhile x>=1:\n\t\tfacto=facto*x\n\t\tx=x-1\n\treturn facto", "def value(x):\r\n val = 0\r\n ace_count = 0;\r\n for i in range(len(x)):\r\n if x[i] == 1:\r\n ace_count += 1\r\n val += 11\r\n else:\r\n val += x[i]\r\n while val > 21 and ace_count != 0:\r\n val -= 10\r\n ace_count -= 1\r\n return val", "def add(self, x):\n self.sum += (1 / self.counter) * (x - self.sum)\n self.counter += 1", "def how_many_100s(self, amount):\n return amount // 100", "def roll(entropy, n):\n\n # Minimum bit depth to cover the full range.\n # Note that more bits would be more fair.\n bit_depth = math.ceil(math.log2(n))\n\n x = entropy(bit_depth)\n\n # Scale from total range to desired range.\n # Numbers with higher odds will be evenly distributed.\n return math.floor(x * n / 2 ** bit_depth)", "def mod_power(x, a, m):\n r = 1\n x = x % m\n while a > 0:\n if a & 1:\n r = (r * x) % m\n a >>= 1\n x = (x * x) % m\n return r", "def f(z):\n if abs(z) > 2:\n return 1\n else:\n n = 1\n while abs(z) < 2:\n n += 1\n if n > 100:\n return 0\n else:\n z = z**2 + c\n return n", "def div2(x):\n if odd(x):\n x += n\n return x // 2 % n", "def rotl(x, count):\n ret = 0\n for i in range(64):\n bit = (x >> i) & 1\n ret |= bit << ((i + count) % 64)\n return ret", "def square(original_number):\n running_total = 0\n for counter in range(original_number):\n running_total = running_total + original_number\n return running_total", "def how_many_50s(self, amount):\n return amount // 50", "def sol(n):\n p = 1\n res = 0\n \n while n:\n p*=5\n if n&1:\n res+=p\n n=n>>1\n return res%1000000007", "def update(self, idx, x):\n while idx < len(self.bit):\n self.bit[idx] += x\n idx |= idx + 1", "def calc(self):\n num = 22\n while not self.divisible(num):\n # we know that only even numbers are divisible by 2, so\n # we only inspect even numbers.\n num = num + 2\n if num % 10000:\n print(str(num), end='\\r')\n\n return num", "def modulo(x, y) :\n if (x / y) < 1:\n return x\n else:\n return modulo(x - y, y)", "def _enc(x: int) -> float:\n return 2 + x + (29 / (x ** 2 + (1 - x) ** 2))", "def power(x, m, n):\n a = 1\n while m > 0:\n if m % 2 == 1:\n a=(a*x)%n\n x=(x*x)%n\n m//=2\n return a", "def step(indiv):\n\tsoma=0\n\ttamanho = len(indiv)\n\tfor i in range(len(indiv)):\n\t\tsoma += math.floor(indiv[i])\n\treturn 6*tamanho +soma", "def self_powers():\n return sum([i ** i for i in range(1, 1001)]) % (10 ** 10)", "def overall_reduction(self):\n return 84", "def hash_function(self, x):\n if not x:\n return -1\n hashed_value = 0\n\n for char in x:\n hashed_value = 181 * hashed_value + ord(char)\n\n return hashed_value % self.capacity", "def constrain(value):\n size = 2**m\n return (value%size)", "def nextpow2(x):\n return int(numpy.ceil(numpy.log2(numpy.abs(x))))", "def modulo_power(x, b):\n \n r = x % b\n ct = 0\n pows = {}\n while r not in pows:\n pows[r] = ct\n ct += 1\n r = x * r % b\n return ct - pows[r]", "def bintogray(x: int) -> int:\n assert x >= 0\n return x ^ (x >> 1)", "def _find_nearest_power_of_two(x):\n\n return 1 << (x - 1).bit_length()", "def roundMultiple(x, base=4):\n return int(base * round(float(x)/base))", "def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1", "def nextpow2(i):\n n = 1\n while n < i:\n n *= 2\n return n", "def find_minrun(n: int) -> int:\n r = 0 # Becomes 1 if any bits are shifted off\n assert n >= 0\n while n >= 64:\n # The target of this while-loop:\n # If n is an exact power of 2, return 32;\n # otherwise, return int k in [32,64] such that n/k is close to, but strictly \n # less than, an exact power of 2 that is larger than 2^1=2.\n \n # | is `OR by bits`, & is `AND by bits`. ie r = r|(n&1).\n # The next two lines of code work as follows:\n # 1. If n is an exact power of 2, then for all loops, n&1=0, r=r|0=0|0=0, \n # and n is halved, until n=64 and is halved to 32, with r=0, so returns 32.\n # 2. Otherwise, then there must be at least one `1` among the second to the \n # last digits of n's binary form, eg.10010000. We scan from the rightmost digit # to the left, and whenever a 1 is met, r is 1. n will decrease to the n//2^k \n # that is closest to but less than 64. The target is met.\n #\n # In essence, this procedure is simply taking the first 6 bits of n, and add \n # 1 if any of the remaining bits is 1 (we call a bit that is 1 a \"set bit\").\n\n r |= n & 1\n n >>= 1 # move n's binary form all 1 digit to the right, ie n = n // 2\n # If n < 64, just return n, since it is too small to bother with fancy stuff\n return n + r", "def fn(n, x, r):\n if n == 0: return 1\n ans = 0\n for xx in range(6): \n if xx != x: ans += fn(n-1, xx, 1)\n elif xx == x and r < rollMax[x]: ans += fn(n-1, x, r+1)\n return ans", "def infinite_square_well_potential(x):\n return 0", "def f(n):\n\tfor i in range(101, n):\n\t\tif (i % 21 == 0):\n\t\t\treturn i", "def twenty():\r\n \r\n n = 100\r\n factorial = 1\r\n sum = 0\r\n \r\n while n > 0:\r\n factorial *= n\r\n n -= 1\r\n \r\n for c in str(factorial):\r\n sum += int(c)\r\n \r\n return sum", "def make_modulus(num_bits=512):\n return find_good_prime(num_bits) * find_good_prime(num_bits)", "def iterative_fuel(d):\n accumulator = d\n total = 0\n while True:\n accumulator = math.floor(accumulator / 3) - 2\n if accumulator < 0:\n return total\n total += accumulator", "def fast_expo_iter(a, n, mod):\n ans = 1\n while n > 0:\n if n & 1: # CHECKS IF ODD OR NOT GIVES RETURNS 1 IF ODD\n ans = (ans * a) % mod\n a = (a * a) % mod\n n = int(n / 2)\n return ans", "def nextup(x, steps=[1, 2, 5, 10]):\n x_exp = fexp(x)\n x_man = fman(x)\n for i, s in enumerate(steps):\n sd = Decimal(str(s))\n if sd > x_man:\n break\n return float(s) * np.power(10.0, x_exp)", "def increment(x): # pylint: disable=invalid-name\n return x + 1", "def find_block(n: int) -> int:\n return(n // 55 + 1)", "def modExp(a, b, n):\n c = 0\n d = 1\n for bi in bin(b)[2:]:\n c = 2 * c\n d = (d * d) % n\n if bi == '1':\n c += 1\n d = (d * a) % n\n return d", "def solution(n: int = 2000000) -> int:\n\n return sum(takewhile(lambda x: x < n, prime_generator()))", "def how_many_20s(self, amount):\n return amount // 20", "def sw(n):\n return 4*n*n + 2*n + 1", "def findSpecialFactor(divisor):\n for i in range(1, 1000):\n prod = i * factor\n if prod % findMod(prod) == i:\n return i", "def Z(n):\n count5 = 0\n i = 1\n while 1:\n a = pow(5, i)\n if a > n:\n return count5\n else:\n count5 += n/a\n i += 1" ]
[ "0.64682055", "0.6353813", "0.63322943", "0.6308356", "0.6298304", "0.62941134", "0.61767954", "0.61767954", "0.61669123", "0.61343306", "0.61135375", "0.60509366", "0.59431607", "0.5926209", "0.5924908", "0.5908529", "0.5902806", "0.5896249", "0.58734715", "0.5857224", "0.5839226", "0.58385986", "0.5828709", "0.58237207", "0.5820636", "0.5814938", "0.58137983", "0.5798882", "0.57957083", "0.5790856", "0.5780146", "0.5767887", "0.5767283", "0.57611823", "0.574455", "0.5740237", "0.57330924", "0.5719129", "0.5718365", "0.5715715", "0.5713456", "0.57124394", "0.5711526", "0.57063323", "0.57040715", "0.5688624", "0.56856054", "0.568167", "0.56703806", "0.5669941", "0.5665767", "0.56480813", "0.56421524", "0.563793", "0.56357396", "0.56344897", "0.5634303", "0.56325537", "0.56323314", "0.5631536", "0.56237936", "0.5622187", "0.56216824", "0.5620773", "0.56170315", "0.5616064", "0.5608793", "0.5607957", "0.5603743", "0.5601377", "0.5597418", "0.55828166", "0.5580968", "0.5571328", "0.5567172", "0.5556735", "0.55528444", "0.5547765", "0.554202", "0.5539329", "0.5539101", "0.5535848", "0.55306727", "0.55267847", "0.5525768", "0.5521439", "0.55146056", "0.5513852", "0.55137426", "0.5513728", "0.55112076", "0.55084753", "0.5505091", "0.5497314", "0.54961663", "0.54928917", "0.54927564", "0.54880935", "0.5476802", "0.5467286" ]
0.6562799
0
Simulate an SHA256 ending pad (1 bit, zero bad, 64bit length)
def end_shastream(length: int): padlen = padlen_64(length + 1 + 8) return bytes([0x80]) + bytes(padlen) + int.to_bytes(length * 8, 8, 'big')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SHA256(self) -> _n_0_t_3[_n_0_t_9]:", "def _sha256(sha256):\n if not sha256:\n sha256 = \"0\" * 64\n\n return sha256", "def shake256(data=None, digest_size=512):\n return SpongeHash(512, digest_size, data, \"SHAKE256\", KeccakSponge, PAD_SHAKE)", "def sha256(message: bytes):\n # convert message bitarray\n bit_msg = bitarray(endian='big')\n bit_msg.frombytes(message)\n L = len(bit_msg)\n\n # additions done mod 2^32\n pow2 = pow(2,32)\n\n # append 1 followed by K 0s where K is the minimum number >= 0 such that \n # len(bit_msg) + 1 + K + 64 is a multiple of 512\n bit_msg = bit_msg + bitarray('1') + (bitarray('0') * ((-L-65) % 512))\n # append len(bit_msg) as a 64-bit int to bit_msg\n bit_msg = bit_msg + util.int2ba(L, length=64, endian='big')\n\n # initialize hash to predefined values\n current_hash = [h for h in initial_hash]\n\n # operate on each 512-bit chunk\n for chunk_index in range(len(bit_msg)//512):\n chunk = bit_msg[chunk_index * 512 : (chunk_index+1) * 512]\n # w is array of 64 32-bit words with first 16 equal to chunk\n w = [chunk[i*32 : (i+1)*32] for i in range(16)]\n w.extend([bitarray(32) for _ in range(48)])\n # create last 48 words in w from first 16\n for i in range(16, 64):\n s0 = rightrotate(w[i-15], 7) ^ rightrotate(w[i-15], 18) ^ rightshift(w[i-15], 3)\n s1 = rightrotate(w[i-2], 17) ^ rightrotate(w[i-2], 19) ^ rightshift(w[i-2], 10)\n w[i] = int2ba32(sum(map(util.ba2int, [w[i-16], s0, w[i-7], s1])) % pow2)\n\n # copy current hash (stored in hex) into working list v as bitarrays\n v = list(map(int2ba32, current_hash))\n # compression\n for i in range(64):\n S1 = rightrotate(v[4], 6) ^ rightrotate(v[4], 11) ^ rightrotate(v[4], 25)\n ch = (v[4] & v[5]) ^ ((~v[4]) & v[6])\n temp1 = (constants[i] + sum(map(util.ba2int, [v[7], S1, ch, w[i]]))) % pow2\n S0 = rightrotate(v[0], 2) ^ rightrotate(v[0], 13) ^ rightrotate(v[0], 22)\n maj = (v[0] & v[1]) ^ (v[0] & v[2]) ^ (v[1] & v[2])\n temp2 = (util.ba2int(S0) + util.ba2int(maj)) % pow2\n\n # shift elements of v by 1\n for j in reversed(range(1, len(v))):\n v[j] = v[j-1]\n v[0] = int2ba32((temp1 + temp2) % pow2)\n v[4] = int2ba32((util.ba2int(v[4]) + temp1) % pow2)\n\n # add compressed values (which are bitarrays) to current_hash (which are ints)\n current_hash = list(map(lambda a,b: (a + util.ba2int(b)) % pow2, current_hash, v))\n\n # each entry of current_hash is a 32-bit integer so convert to 4 bytes \n # adding bytes appends them\n return b''.join(x.to_bytes(4, 'big') for x in current_hash)", "def default_sha256(key: KeyT, *args, **kwargs) -> bytes:\n return sha256(key).digest() # type: ignore", "def sha256(key: bytes, buffer: Optional[bytes] = None) -> Hmac:\n return new(key, buffer, \"sha256\")", "def shake128(data=None, digest_size=256):\n return SpongeHash(256, digest_size, data, \"SHAKE128\", KeccakSponge, PAD_SHAKE)", "def test_right_pad(pad_contract):\n\n address_bytes = get_address_as_bytes(pad_contract.call().rightPad())\n hash = bitcoin.bin_sha256(address_bytes)\n val = pad_contract.call().getHashRightPad()\n val = force_bytes(val)\n assert hash == val", "def test_left_pad(pad_contract):\n\n address_bytes = get_address_as_bytes(pad_contract.call().leftPad())\n hash = bitcoin.bin_sha256(address_bytes)\n val = pad_contract.call().getHashLeftPad()\n val = force_bytes(val)\n assert hash == val", "def sha256_fdh(message: bytes, target_length=None, seed=0):\n if target_length is None:\n return sha256(message)\n if target_length < 32:\n raise ValueError(\"target length must be a value in bytes >= 32, the length of one SHA256 output\")\n cycles = target_length // 32\n # number of bytes needed to store largest cycle index to append to message\n max_num_bytes = int(math.log(cycles, 2)//8) + 1\n # concatenate hashes together\n output = b''.join(sha256(message + (c + seed).to_bytes(max_num_bytes, 'big')) for c in range(cycles))\n # append 0s to output until it reaches target_length\n if target_length > len(output):\n return output + (0).to_bytes(target_length - len(output), 'big')\n return output", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def pad(s):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\treturn s + b\"\\0\" * (AES.block_size - len(s) % AES.block_size)", "def blake2_256(data):\n return blake2b(data, digest_size=32).digest()", "def sha256(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)\n d.update(data)\n return d.digest()", "def SHA256(data: str) -> str:\n\n msg = []\n for e in data:\n # convert chars to binary in 8-bit \n chbin = prepad(binary(ASCII[e]), to=8)\n msg.extend(chbin)\n \n # get length (in bits) of input\n datalen = binary(len(msg))\n if len(datalen) > 64:\n raise ValueError(\"input is too large\")\n\n # pad out message to factor of 512 (512-bit blocks)\n tail = [0]*(64-len(datalen)) + datalen\n padding = [1] + [0]*(511-((len(msg)+len(tail)) % 512))\n msg += (padding + tail)\n \n ctx = None\n for i in range(0, len(msg), 512):\n block = msg[i:i+512] \n wds = [UBitArray32(block[i:i+32]) for i in range(0, 512, 32)]\n wds = schedule(wds)\n # set context for next block\n ctx = compress(wds, ctx)\n\n hexdigest = \"\".join(x.tohex() for x in ctx)\n return hexdigest", "def padlen_64(x: int):\n return (64 - (x % 64)) % 64", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def hash_128_bit_pass(passwd):\n h = hashlib.sha256()\n h.update(passwd)\n return h.hexdigest()[:16]", "def sha256(value):\n return hashlib.sha256(value).hexdigest()", "def padding_encryption():\n return padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )", "def pad(s):\n return s + (16 - len(s) % 16) * chr(16 - len(s) % 16)", "def hash_bytes_256(b: bytes) -> str:\n return hashlib.sha256(b).hexdigest()", "def testCheckSha256Signature_FailBadOutputLength(self):\n sig_data = 'fake-signature'.ljust(256)\n signed_hash = 'fake-hash' # Malformed (not 32 bytes in length).\n self.DoCheckSha256SignatureTest(False, True, sig_data,\n common.SIG_ASN1_HEADER, signed_hash,\n signed_hash)", "def _hmac_sha256(key, msg):\n\n return hmac.new(key, msg, hashlib.sha256).digest()", "def RSA_SIGNATURE_HASH() :\n return \"SHA-256\"", "def hash64bits(*args):\n # 64 bits hexdigest\n h = hashlib.sha1(bytes(repr(args), \"utf-8\")).hexdigest()[:16]\n # Convert to an integer and return\n return int(h, 16)", "def f(data=None):\n\n hsh = SHA512.new()\n hsh.update(b\"1\")\n hsh.update(data)\n return hsh", "def sha256_hexoutput(in_str):\r\n return sha256(in_str.encode('ascii')).hexdigest()", "def sha3_256(data=None):\n return SpongeHash(512, 256, data, \"SHA3-256\", KeccakSponge, PAD_SHA3)", "def h0(data_1=None, data_2=None):\n\n hsh = SHA512.new()\n hsh.update(b\"3\")\n hsh.update(data_1)\n hsh.update(data_2)\n return hsh", "def checksum(**kwargs):\n\n # remove secretkey from kwargs, lookup if missing\n secretkey = kwargs.pop('secretkey', resolve_secretkey())\n\n # sort the args, and concatenate them\n param_string = ''.join([''.join([str(x), str(y)])\n for x, y in sorted(kwargs.items())])\n\n return b64encode(str(new_hmac(secretkey, param_string, sha1).digest()))", "def hash_bytes(\r\n k: bytes,\r\n e: bytes,\r\n) -> bytes:\r\n return hmac.new(k, e, hashlib.sha256).digest()", "def slophash(val):\n\n if not val:\n return None\n else:\n return sha256(val.encode('utf8')).hexdigest()[0:10]", "def _derive_padding_crypto(self, seed, pad_string): # XXX consider secret_seed\n secret = self.mac(pad_string,\n seed,\n self.shared_secret)\n return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])", "def sha256(inputs, secret):\n m = hmac.new(secret, b'', hashlib.sha256)\n for i in inputs:\n m.update(i)\n return m.digest()", "def sha256(s: str) -> str:\n return hashlib.sha256(s.encode()).hexdigest()", "def padding(message):\n\n # Convert the string to bits by calling the tobits function\n mbits = tobits(message)\n # Get the length of bits\n length = len(mbits)\n # Calculate the strengthening vector length\n strengthmessage = (bin(int(length))[2:]).zfill(64 * ((len(bin(int(length))[2:]) + 63) // 64))\n\n # Create a padding which starts with 1\n padding = '1'\n # Get the number of zeroes to pad\n get_length = 128 - (length + 64) % 128\n # Run the for loop to append all 0's\n for i in range(0, get_length - 1):\n padding = padding + '0'\n\n # Make the entire pad \n to_add_pad = padding + strengthmessage\n # Return the entire pad\n return to_add_pad", "def sha256(ctx, salt=\"\"):\n if ctx.data:\n salted_input_value = salt + \":\" + ctx.data\n ctx.data = hashlib.sha256(salted_input_value.encode()).hexdigest()\n else:\n raise RefError(\n \"Ref error: eval_func: nothing to sha256 hash; try \" \"something like '|random:str|sha256'\"\n )", "def _sha_byte_len(self):\n if self is HashType.SHA1:\n return 20\n if self is HashType.SHA224:\n return 28\n if self is HashType.SHA256:\n return 32\n if self is HashType.SHA384:\n return 48\n if self is HashType.SHA512:\n return 64\n return 0", "def sha224(key: bytes, buffer: Optional[bytes] = None) -> Hmac:\n return new(key, buffer, \"sha224\")", "def test_default_sha256_bytes(self):\n this_is_a_test = [\n 10244166640140130606,\n 5650905005272240665,\n 14215057275609328422,\n 5952353080197385534,\n 4990779931033217093,\n ]\n this_is_also = [\n 4140421647067018332,\n 9306548247555387104,\n 5672713771950536751,\n 8501641957786831066,\n 15146689942378126332,\n ]\n hashes = default_sha256(b\"this is a test\", 5)\n self.assertEqual(hashes, this_is_a_test)\n hashes = default_sha256(b\"this is also a test\", 5)\n self.assertEqual(hashes, this_is_also)", "def nullPad(s):\n padding = chr(0) * (Blowfish.block_size - (len(s) % Blowfish.block_size))\n if padding:\n return s + padding\n else:\n return s", "def hashing(word) :\r\n ans = hashlib.sha256(word.encode())\r\n return ans.hexdigest()", "def iv():\n return chr(0) * 16", "def badhash(x):\n x = (((x >> 16) ^ x) * 0x045d9f3b) & 0xFFFFFFFF\n x = (((x >> 16) ^ x) * 0x045d9f3b) & 0xFFFFFFFF\n x = ((x >> 16) ^ x) & 0xFFFFFFFF\n return x", "def g(data_1=None, data_2=None):\n\n hsh = SHA512.new()\n hsh.update(b\"2\")\n hsh.update(data_1)\n hsh.update(data_2)\n return hsh", "def Sha256(data: Union[bytes, str]) -> bytes:\n return hashlib.sha256(AlgoUtils.Encode(data)).digest()", "def get_binary_sha256_hash(hash: str) -> str:\n result = \"\"\n\n for character in hash:\n character_number = int(character, base=16)\n binary_number = bin(character_number)\n # CAVEAT: each hash character is 4 bit size since SHA256 hash is hexidecimal string, so 4 * 64 = 256 bit\n formatted_binary_number = binary_number[2:].ljust(4, \"0\")\n result += formatted_binary_number\n\n return result", "def getPaddedLength(string): \n if len(string) > 2<<64:\n return -1\n return struct.pack('<l', len(string))", "def __pad(self, data):\n return data + (AES.block_size - len(data) % AES.block_size) * \\\n chr(AES.block_size - len(data) % AES.block_size)", "def test_long():\n key = 'A' * 242\n hashed_key = '%s[3705915182]' % ('A' * 229)\n full_key = 'prefix:1:%s' % hashed_key\n assert full_key == make_key(key, 'prefix', 1)", "def get_sha256_hash(key, size=None):\n partition_hash = hashlib.sha256()\n for part in key:\n partition_hash.update(str(part).encode('utf-8'))\n sha256_hash = partition_hash.hexdigest()\n if not size or size > len(sha256_hash):\n size = len(sha256_hash)\n return sha256_hash[:size]", "def pad(data):\r\n bytes_to_pad = AES.block_size - len(data) % AES.block_size\r\n return data + (bytes_to_pad * chr(bytes_to_pad))", "def _produce_key(self, passphrase):\n from hashlib import sha256\n pp = bytes(passphrase, 'utf-8')\n hash_alg = sha256(pp)\n for i in range(self._get_key_stretches()):\n d = hash_alg.digest()\n hash_alg.update(d + pp)\n return hash_alg.digest()", "def checksum(payload):\n return hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4]", "def _MakeEmsaMessageSha256(self, msg, modulus_size, logf=None):\r\n magic_sha256_header = [0x30, 0x31, 0x30, 0xd, 0x6, 0x9, 0x60, 0x86, 0x48,\r\n 0x1, 0x65, 0x3, 0x4, 0x2, 0x1, 0x5, 0x0, 0x4, 0x20]\r\n\r\n hash_of_msg = hashlib.sha256(msg).digest() #???\r\n\r\n self._Log(logf, 'sha256 digest of msg %s: [%s]' % (msg, hash_of_msg.encode('hex')))\r\n\r\n encoded = ''.join([chr(c) for c in magic_sha256_header]) + hash_of_msg\r\n\r\n msg_size_bits = modulus_size + 8-(modulus_size % 8) # Round up to next byte\r\n\r\n pad_string = chr(0xFF) * (msg_size_bits / 8 - len(encoded) - 3)\r\n return chr(0) + chr(1) + pad_string + chr(0) + encoded", "def hex_hash(s):\n if not s:\n return '0'\n s = s.encode('utf-8')\n return '{:x}'.format(adler32(s) & 0xffffffff)", "def sha256(cls, value):\n assert type(value) is str\n return int(sha256(value.encode()).hexdigest(), 16)", "def generate_aes_key ( ) :\n import hashlib\n sr = Crypto.Random.random.StrongRandom( )\n key_bits = sr.getrandbits( 256 )\n sha_key = hashlib.sha256( str( key_bits ) ).digest( )\n return sha_key", "def blake2_128(data):\n return blake2b(data, digest_size=16).digest()", "def uint_test(input):\n\n s = Sha224()\n s.sha_update(input, len(input))\n return s.sha_digest()", "def test_filler():\n expected = (\n 'b77d99c935d3f32469844f7e09340a91ded147557bdd0456c369f7e449587c0f566'\n '6faab58040146db49024db88553729bce12b860391c29c1779f022ae48a9cb314ca'\n '35d73fc91addc92632bcf7ba6fd9f38e6fd30fabcedbd5407b6648073c38331ee7a'\n 'b0332f41f550c180e1601f8c25809ed75b3a1e78635a2ef1b828e92c9658e76e49f'\n '995d72cf9781eec0c838901d0bdde3ac21c13b4979ac9e738a1c4d0b9741d58e777'\n 'ad1aed01263ad1390d36a18a6b92f4f799dcf75edbb43b7515e8d72cb4f827a9af0'\n 'e7b9338d07b1a24e0305b5535f5b851b1144bad6238b9d9482b5ba6413f1aafac3c'\n 'dde5067966ed8b78f7c1c5f916a05f874d5f17a2b7d0ae75d66a5f1bb6ff932570d'\n 'c5a0cf3ce04eb5d26bc55c2057af1f8326e20a7d6f0ae644f09d00fac80de60f20a'\n 'ceee85be41a074d3e1dda017db79d0070b99f54736396f206ee3777abd4c00a4bb9'\n '5c871750409261e3b01e59a3793a9c20159aae4988c68397a1443be6370fd9614e4'\n '6108291e615691729faea58537209fa668a172d066d0efff9bc77c2bd34bd77870a'\n 'd79effd80140990e36731a0b72092f8d5bc8cd346762e93b2bf203d00264e4bc136'\n 'fc142de8f7b69154deb05854ea88e2d7506222c95ba1aab065c8a'\n )\n\n sp, v = sphinx_path_from_test_vector(\n 'tests/vectors/onion-test-multi-frame.json'\n )\n filler = sp.get_filler()\n assert(2 * len(filler) == len(expected))\n assert(bytes.hex(bytes(filler)) == expected)", "def b64hash(s):\n _hash = hashlib.sha256()\n _hash.update(str2bytes(s))\n return bytes2str(b64encode(_hash.digest()))", "def make_secure_val(val):\n return \"%s|%s\" % (val, hmac.new(secret, val).hexdigest())", "def _derive_crypto(self, pad_string): # XXX consider secret_seed\n secret = self.mac(pad_string,\n self.initiator_seed + self.responder_seed,\n self.shared_secret)\n return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])", "def gen_secret() -> str:\n r = random.randrange(0, 255) # INSECURE, just for demo\n r = hex(r)[2:]\n if len(r) == 1:\n return f'0{r}'\n return r", "def hash_function(s):\n\n # O(n) over the key length\n # O(1) over the HASH_DATA_SIZE\n\n bytes_list = s.encode()\n\n total = 0\n\n\n for b in bytes_list: # O(n) over the length of the key\n total += b\n\n\n total &= 0xffffffff # 32 bit (8 f's)\n\n return total", "def make_secure_val(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())", "def make_secure_val(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())", "def make_secure_val(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())", "def sha_new(arg=None):\r\n\r\n crypto = sha()\r\n if arg:\r\n crypto.update(arg)\r\n\r\n return crypto", "def create_password_hash(self, password):\n return pbkdf2_sha256.encrypt(password, rounds=1000, salt_size=16)", "def padding(string):\r\n\tbinary = ascii_to_binary(string)\r\n\tl = len(binary)\r\n\tif l >= 448:\r\n\t\treturn \"STRING IS TOO LONG\"\r\n\telse:\r\n\t\tbinary += \"1\"\r\n\t\t\t\r\n\t\tfor i in range(448-len(binary)):\r\n\t\t\tbinary += \"0\"\r\n\r\n\t\tbinary = binary + conversions.decimal_to_binary(l, 64)\r\n\r\n\t\treturn binary", "def sha3_256(x):\n return hashlib.sha3_256(x).digest()", "def xxh128(data):\n storage_key1 = bytearray(xxhash.xxh64(data, seed=0).digest())\n storage_key1.reverse()\n\n storage_key2 = bytearray(xxhash.xxh64(data, seed=1).digest())\n storage_key2.reverse()\n\n return storage_key1 + storage_key2", "def left_zero_pad(s, blocksize):\n if blocksize > 0 and len(s) % blocksize:\n s = (blocksize - len(s) % blocksize) * b('\\000') + s\n return s", "def generate(length):\n return base64.encodestring(OpenSSL.rand.bytes(256))[:length]", "def _encode_2xbase64pad(data: str) -> str:\n pattern = r\"[^a-zA-Z0-9]\"\n regex = re.compile(pattern)\n while True:\n # First run\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n\n # Second run\n ebytes = base64.b64encode(estring.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n\n if not regex.findall(estring):\n break\n # Pad with trailing space and try again to eliminate base64 pad/special chars\n data = data + \" \"\n\n return estring", "def test_default_sha256(self):\n this_is_a_test = [\n 10244166640140130606,\n 5650905005272240665,\n 14215057275609328422,\n 5952353080197385534,\n 4990779931033217093,\n ]\n this_is_also = [\n 4140421647067018332,\n 9306548247555387104,\n 5672713771950536751,\n 8501641957786831066,\n 15146689942378126332,\n ]\n hashes = default_sha256(\"this is a test\", 5)\n self.assertEqual(hashes, this_is_a_test)\n hashes = default_sha256(\"this is also a test\", 5)\n self.assertEqual(hashes, this_is_also)", "def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result", "def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result", "def concatHash(h1: int, h2: int, len2: int, mod=10**11 + 7, base=1313131) -> int:\r\n return (h1 * pow(base, len2, mod) + h2) % mod", "def hash_password(password):\n\n return hashlib.sha224(password).hexdigest()[:20]", "def pad(msg):\n return msg + (BLOCK_SIZE - len(msg)) * PADDING", "def pad_base64_str(str):\n missing_padding = len(str) % 4\n if missing_padding != 0:\n str += '=' * (4 - missing_padding)\n return str", "def hotp(secret, count, digits=None):\n if not digits:\n digits = 6\n\n count_hex = '%x' % count\n\n count_hex = '0' * (16-len(count_hex)) + count_hex\n\n result = \"\"\n for i in xrange(0, 8):\n result += count_hex[i*2:i*2+2].decode('hex')\n\n hash = hmac.new(secret, result, digestmod=sha1).hexdigest()\n\n offset = int(hash[-1], 16)\n\n part = hash[(offset*2):(offset*2)+8]\n\n part_int = int(part, 16) & int(\"7fffffff\", 16)\n\n return part_int % 10**digits", "def sha256(content):\n content = content.encode('utf-8')\n return hashlib.sha256(content).hexdigest()", "def sha256_p(value):\n # check if the value has the expected type\n string_p(value)\n\n # SHA-256 hash has 64 hexadecimal characters\n if not re.fullmatch(r\"^[a-fA-F0-9]{64}$\", value):\n raise Invalid(\"the value '{value}' does not seem to be SHA256 hash\".format(value=value))", "def hash_password(password):\n password_md5 = hashlib.md5(password.encode('utf-8')).hexdigest()\n for i in range(0, len(password_md5), 2):\n if password_md5[i] == '0':\n password_md5 = password_md5[0:i] + 'c' + password_md5[i + 1:]\n return password_md5", "def test_create_with_defaults(self):\n digest = Digest.create()\n hashing_algorithm = HashingAlgorithmEnum.SHA_256\n digest_value = b''\n key_format_type = KeyFormatTypeEnum.RAW\n\n self._test_create(digest, hashing_algorithm, digest_value,\n key_format_type)", "def nice_hash(*args):\n h = sha1()\n for item in args:\n h.update(unicode(item))\n return b32encode(h.digest())", "def encode_length(value):\n if value == Length.INDEFINITE:\n return bytes([0b10000000])\n\n if value < 127:\n return bytes([value])\n\n output = []\n while value > 0:\n value, remainder = value // 256, value % 256\n output.insert(0, remainder)\n\n # prefix length information\n output = [0b10000000 | len(output)] + output\n return bytes(output)", "def __polynomial_hash(self, s, base = 31, max_size=168):\r\n digest = 0\r\n max_size = 168\r\n for c in s: digest = base * digest + ord(c)\r\n digest &= 2 ** max_size - 1 \r\n return hex(digest).rstrip('L')", "def mbrpad(s):\n length = len(s)\n if length == 32:\n return s\n elif length > 31:\n raise Exception(\"Constant too long\")\n s = bytearray(s + '\\x00' * (32 - len(s)))\n s[length] ^= 0x80\n s[-1] ^= 0x01\n return bytes(s)", "def sha256(self):\n return self.sha256checksums()", "def len_unpadded(self) -> int:", "def sha256_encode(text):\n _hash = hashlib.sha256\n if type(text) is str:\n return _hash(text.encode('utf8')).digest()\n elif type(text) is bytes:\n return _hash(text).digest()\n elif not text:\n # Generally for calls where the payload is empty. Eg: get calls\n # Fix for AttributeError: 'NoneType' object has no attribute 'encode'\n return _hash(\"\".encode('utf8')).digest()\n else:\n return _hash(str(text).encode('utf-8')).digest()", "def digest(self):\n d = MegaCrypto.str_to_a32(self.hash)\n return (d[0] ^ d[1], d[2] ^ d[3])", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass" ]
[ "0.7570556", "0.7199887", "0.6765323", "0.64624476", "0.6452416", "0.6417613", "0.63762766", "0.634967", "0.6248739", "0.62149316", "0.6214463", "0.61732584", "0.61626583", "0.61307865", "0.61083645", "0.6072602", "0.60649854", "0.6037192", "0.5975247", "0.5962065", "0.5933033", "0.5904168", "0.58952475", "0.58874416", "0.58807874", "0.5879751", "0.5866498", "0.58511806", "0.5839751", "0.5829769", "0.58288735", "0.5808021", "0.57830137", "0.5782321", "0.5779777", "0.57792723", "0.57709193", "0.57289237", "0.57185847", "0.57152736", "0.5714296", "0.5712326", "0.5699474", "0.5697414", "0.5696048", "0.569391", "0.5689225", "0.5688327", "0.56843853", "0.5677701", "0.5649657", "0.564807", "0.5641196", "0.5638822", "0.563206", "0.5626884", "0.5620803", "0.561159", "0.5604238", "0.55992794", "0.5598761", "0.55934674", "0.55934", "0.55922675", "0.5591511", "0.5590155", "0.5587955", "0.55810255", "0.55810255", "0.55810255", "0.5572471", "0.55626816", "0.556098", "0.55587316", "0.5558307", "0.5550294", "0.55357563", "0.55350655", "0.55346525", "0.5532031", "0.5532031", "0.5530383", "0.5523737", "0.55022806", "0.55001444", "0.54999334", "0.5496471", "0.5492343", "0.54774225", "0.54768765", "0.5457013", "0.54496294", "0.5447535", "0.54434353", "0.5440494", "0.5423683", "0.5416197", "0.5407677", "0.54042464", "0.54042464", "0.54042464" ]
0.0
-1
Returns None on success, otherwise an explanation string
def test(self, values: Dict[str, Any]) -> Optional[str]: # This is always True if self.cond == '#': return None def why(cond, field, explanation) -> Optional[str]: if cond: return None return '{}: {}'.format(field, explanation) # If it's missing, it's only True if it's a missing test. if self.field not in values: # Default to ignoring id field as long as no version. if self.field == '': return why('-' not in self.value, 'id', 'unknown version {}'.format(self.value)) return why(self.cond == '!', self.field, 'is missing') # If they supply a function, hand it to them. if callable(values[self.field]): return values[self.field](self) val = str(values[self.field]) if self.cond == '!': return why(False, self.field, 'is present') elif self.cond == '=': return why(val == self.value, self.field, '!= {}'.format(self.value)) elif self.cond == '/': return why(val != self.value, self.field, '= {}'.format(self.value)) elif self.cond == '^': return why(val.startswith(self.value), self.field, 'does not start with {}'.format(self.value)) elif self.cond == '$': return why(val.endswith(self.value), self.field, 'does not end with {}'.format(self.value)) elif self.cond == '~': return why(self.value in val, self.field, 'does not contain {}'.format(self.value)) elif self.cond == '<': try: actual_int = int(val) except ValueError: return why(False, self.field, "not an integer field") try: restriction_val = int(self.value) except ValueError: return why(False, self.field, "not a valid integer") return why(actual_int < restriction_val, self.field, ">= {}".format(restriction_val)) elif self.cond == '>': try: actual_int = int(val) except ValueError: return why(False, self.field, "not an integer field") try: restriction_val = int(self.value) except ValueError: return why(False, self.field, "not a valid integer") return why(actual_int > restriction_val, self.field, "<= {}".format(restriction_val)) elif self.cond == '{': return why(val < self.value, self.field, 'is the same or ordered after {}'.format(self.value)) elif self.cond == '}': return why(val > self.value, self.field, 'is the same or ordered before {}'.format(self.value)) else: # We checked this in init! assert False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def description():", "def describe(self):\n return ''", "def help(self):\n res = \"\"", "def _get_problem_str(self):\n return ''", "def summary_str(self):\n if not self.results:\n return self.summary.empty() or ''\n elif self.state == Ok:\n return self.summary.ok(self.results) or ''\n return self.summary.problem(self.results) or ''", "def _description(self):\n return None", "def test_print_result(capsys):\n assert \"\"\"Total 5 hands solved\nTotal 4 hands solved with hint\nTotal 4 hands failed to solve\"\"\" in hl.test_help_print_result(capsys)", "def Description(self) -> str:", "def Description(self) -> str:", "def describe(self) -> str:", "def shortDescription(self):\n return None", "def info():\n return r\"\"\"None\"\"\"", "def full_description(self):\n des = describe_dut(self.dut) if self.dut else ''\n if self.build:\n des += ' with ' + self.build\n if self.result_id:\n des += ' BVT result ID ' + str(self.result_id)\n return (self.description if self.description \n else 'unknown test') + ' on ' + des", "def summary_string(self) -> str:", "def shortDescription(self):\r\n doc = self._testMethodDoc\r\n return doc and doc.split(\"\\n\")[0].strip() or None", "def test_explained_text(self):\n result = self._do_output(o.ExplainedTextOutput(o.Color.Never), self._demo_msgs)\n self.assertEqual(result,\n \"mock: mock.cmake(1): error: short text\\n\"\n \" * long text\\n\"\n \" * You can ignore this problem with --ignore mock_msg\\n\"\n \"mock: mock.cmake(2): warning: short text\\n\"\n \"mock: mock.cmake(3): notice: short text\\n\"\n \"mock: error: short text\\n\"\n \"mock: mock.cmake: error: short text\\n\"\n )", "def help_description():\n pass", "def explain(self):", "def description(self):", "def remediation_description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"remediation_description\")", "def get_intro_message() -> str:\n return \"\"\"You are about to begin a new record.\nType the text sample you want to record.\nThis first sample MUST be typed by the real user (no impostor data).\"\"\"", "def help():\n return statement(help_text)", "def help(self) -> str:\n\t\treturn None", "def describe(result_code):\n return _MESSAGES.get(result_code) or 'unknown error'", "def lab9_q2():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab7_q1():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab7_q2():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def testSummaryDOCstr(self):\n pass", "def lab9_q4():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab9_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab7_q4():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab7_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def shortDescription(self):\n # Suppress default logging of docstrings.\n return None", "def lab9_q5():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def getReason():", "def summary(self):\n return ''", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"description\")", "def _getDiagnosticString():\n text = '\\n## Diagnostic output from minimalmodbus ## \\n\\n'\n text += 'Minimalmodbus version: ' + __version__ + '\\n'\n text += 'Minimalmodbus status: ' + __status__ + '\\n'\n text += 'Revision: ' + __revision__ + '\\n'\n text += 'Revision date: ' + __date__ + '\\n'\n text += 'File name (with relative path): ' + __file__ + '\\n'\n text += 'Full file path: ' + os.path.abspath(__file__) + '\\n\\n'\n text += 'pySerial version: ' + serial.VERSION + '\\n'\n text += 'pySerial full file path: ' + os.path.abspath(serial.__file__) + '\\n\\n'\n text += 'Platform: ' + sys.platform + '\\n'\n text += 'Filesystem encoding: ' + repr(sys.getfilesystemencoding()) + '\\n'\n text += 'Byteorder: ' + sys.byteorder + '\\n'\n text += 'Python version: ' + sys.version + '\\n'\n text += 'Python version info: ' + repr(sys.version_info) + '\\n'\n text += 'Python flags: ' + repr(sys.flags) + '\\n'\n text += 'Python argv: ' + repr(sys.argv) + '\\n'\n text += 'Python prefix: ' + repr(sys.prefix) + '\\n'\n text += 'Python exec prefix: ' + repr(sys.exec_prefix) + '\\n'\n text += 'Python executable: ' + repr(sys.executable) + '\\n'\n try:\n text += 'Long info: ' + repr(sys.long_info) + '\\n'\n except:\n text += 'Long info: (none)\\n' # For Python3 compatibility\n try:\n text += 'Float repr style: ' + repr(sys.float_repr_style) + '\\n\\n'\n except:\n text += 'Float repr style: (none) \\n\\n' # For Python 2.6 compatibility\n text += 'Variable __name__: ' + __name__ + '\\n'\n text += 'Current directory: ' + os.getcwd() + '\\n\\n'\n text += 'Python path: \\n'\n text += '\\n'.join(sys.path) + '\\n'\n text += '\\n## End of diagnostic output ## \\n'\n return text", "def short_description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"short_description\")", "def matcher_description(self):\n return None", "def get_description(self):", "def lab8_q4():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab8_q2():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab8_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def _default_output(result):\n return str(result) if result is not None else None", "def shortDescription(self):\n\n message = super(ForceBalanceTestCase,self).shortDescription()\n if message: return message\n else: return self.id()", "def question_11():\n return None", "def error_msg(self) -> str:\n # subclasses can override as needed\n return self.__doc__", "def shortDescription(self):\n if self._current_test_descr is not None:\n return self._current_test_descr\n return super(TestCase, self).shortDescription()", "def help(self):\n return None", "def get_error_message(self):\n msg = 'Test case: ' + self.benchmark + '.yaml + ' + self.producer + '.yaml failed. '\n info = ''\n if not self.directory:\n info = 'No results directory found. The benchmark probably failed'\n elif not self.reports:\n info = 'No results report generated. The results output format is probably wrong'\n elif not self.test_passed:\n info = 'Recorded messages percentage is lower than expected '\n return msg + info", "def question_7():\n return None", "def describe():", "def format_error (result):\n if check_ok (result):\n return 'exiftool finished probably properly. (\"%s\")' % strip_nl(result)\n else: \n if result is None:\n return \"exiftool operation can't be evaluated: No result given\"\n else:\n return 'exiftool finished with error: \"%s\"' % strip_nl(result)", "def question_4():\n return None", "def description(self):\n return (self.__doc__ or \"\").strip()", "def description(self):\n pass", "def description(self):\n pass", "def describe(self) -> Text:\n return self.__repr__()", "def short_description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"short_description\")", "def remediation_description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"remediation_description\")", "def remediation_description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"remediation_description\")" ]
[ "0.6749599", "0.6595771", "0.65909255", "0.6580674", "0.6552211", "0.65474534", "0.65056944", "0.6464271", "0.6464271", "0.6463483", "0.6435495", "0.6418199", "0.6409962", "0.63947475", "0.6379907", "0.6308825", "0.62577033", "0.62099546", "0.6208194", "0.6198969", "0.6187488", "0.6184926", "0.618147", "0.61812186", "0.61770326", "0.6172973", "0.61674535", "0.61538833", "0.61483145", "0.6138152", "0.6136974", "0.6124741", "0.6077206", "0.60536134", "0.6051652", "0.6046171", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6040289", "0.6033602", "0.6027729", "0.6027059", "0.60195076", "0.601177", "0.6010603", "0.60102034", "0.6003782", "0.59921163", "0.59872806", "0.5982029", "0.59814686", "0.59810156", "0.5975336", "0.59735954", "0.59676194", "0.5963877", "0.59588295", "0.59453344", "0.5934605", "0.5934605", "0.59343535", "0.5934192", "0.59265184", "0.59265184" ]
0.0
-1
Pull an Alternative from encoded string, return remainder
def decode(cls, encstr: str) -> Tuple['Alternative', str]: cond = None end_off = 0 # Swallow field up to conditiona while end_off < len(encstr): if encstr[end_off] in string.punctuation: cond = encstr[end_off] break end_off += 1 if cond is None: raise ValueError('{} does not contain any operator' .format(encstr)) field = encstr[:end_off] end_off += 1 value = '' while end_off < len(encstr): if encstr[end_off] == '|': # We swallow this end_off += 1 break if encstr[end_off] == '&': break if encstr[end_off] == '\\': end_off += 1 value += encstr[end_off] end_off += 1 return cls(field, cond, value), encstr[end_off:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(cls, encstr: str) -> Tuple['Restriction', str]:\n alts = []\n while len(encstr) != 0:\n if encstr.startswith('&'):\n encstr = encstr[1:]\n break\n alt, encstr = Alternative.decode(encstr)\n alts.append(alt)\n return cls(alts), encstr", "def decode(self, s):", "def decode(self, s):", "def trydecode(inp, altchars = None):\r\n\tfor x in range(len(inp), 0, -1):\r\n\t\tfor y in PADDING:\r\n\t\t\ttry:\r\n\t\t\t\tret = b64decode(inp[:x] + y, altchars = None)\r\n\t\t\t\treturn ret\r\n\t\t\texcept TypeError:\r\n\t\t\t\tpass\r\n\traise Exception('Could not decode data')", "def decode(self, byteString):\n decoded = ''\n portion_left = byteString\n while len(portion_left) > 0:\n substr_len = 1\n symbol = None\n while (symbol == None) and (substr_len <= len(portion_left)):\n symbol = self.decode_symbol(portion_left[:substr_len])\n substr_len += 1\n\n if symbol == None:\n print \"decode failed:\"\n print \"decoded: \" + decoded\n print \"left: \" + portion_left\n return None\n\n decoded += symbol\n #print \"decoded: _\" + symbol + \"_\"\n portion_left = portion_left[substr_len-1:]\n\n return decoded", "def decode_string(self, value):\r\n return value", "def decode(self, encoded):", "def decode_network_string(msgtype, plen, buf):\n return buf[header.size:plen - 1]", "def _DecodeAccidentalString(cls, sAccidental):\n sAcc = sAccidental.strip()\n # Strip non-accidental content\n index = 0\n for n in range(len(sAcc)):\n if (sAcc[n] not in cls.lilyFlat) and (sAcc[n] not in cls.lilySharp):\n index = n\n break\n sAcc = sAcc[:index]\n encFlat = cls.encodingAccidentals.get(cls.lilyFlat, None)\n encSharp = cls.encodingAccidentals.get(cls.lilySharp, None)\n if cls._reFlat.match(sAcc):\n return encFlat*len(sAcc)//2\n elif cls._reSharp.match(sAcc):\n return encSharp*len(sAcc)//2\n else:\n return 0", "def decode_base64(self, s):\n return self.transcode(struct.unpack('!L', base64.b64decode(s + '==', self.extra_chars))[0])", "def decode_extra_field(self, string):\n\n if isinstance(string, str):\n try:\n decode = int(string)\n except ValueError:\n return string\n return decode\n else:\n return string", "def test_decodeWithoutFinalASCIIShift(self):\n self.assertEqual(\n b'&AL0'.decode('imap4-utf-7'),\n u\"\\N{VULGAR FRACTION ONE HALF}\",\n )", "def test_decode(self):\n assert url_encoder.decode('TheStakeOut') == 1\n assert url_encoder.decode('TheStockTip-TheSeven') == 800\n assert url_encoder.decode('MaleUnbonding-TheConversion-TheAndreaDoria') == 99999", "def decode_encoded_string_value(byte_iter):\n try:\n # First try \"Value-length Char-set Text-string\"\n value_length = wsp_pdu.Decoder.decode_value_length(byte_iter)\n # TODO: add proper support for charsets...\n try:\n charset = wsp_pdu.Decoder.decode_well_known_charset(byte_iter)\n except wsp_pdu.DecodeError, msg:\n raise Exception('encoded_string_value decoding error - '\n 'Could not decode Charset value: %s' % msg)\n\n return wsp_pdu.Decoder.decode_text_string(byte_iter)\n except wsp_pdu.DecodeError:\n # Fall back on just \"Text-string\"\n return wsp_pdu.Decoder.decode_text_string(byte_iter)", "def safe_decode_inner(s):\n if isinstance(s, unicode):\n return s\n for encoding in preflist:\n try:\n return s.decode(encoding, 'strict')\n except UnicodeDecodeError:\n if logger is not None:\n logger.warn(\"Assuming %(encoding)r, can't decode %(s)r\",\n locals())\n if errors != 'strict' and preferred:\n return s.decode(preferred, errors)\n raise", "def _get_string_from_packing(self, string_to_unpack):\n return string_to_unpack[4:]", "def decode_result(found):\n ...", "def decode(self, s, _w=WHITESPACE.match):\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n end = _w(s, end).end()\n if end != len(s):\n raise ValueError(errmsg(\"Extra data\", s, end, len(s)))\n return obj", "def decode(self, s, _w=WHITESPACE.match):\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n end = _w(s, end).end()\n if end != len(s):\n raise ValueError(errmsg(\"Extra data\", s, end, len(s)))\n return obj", "def decode(a):\n return decode(a)", "def __decodeString(self,ascii):\n second = ascii%256\n first = (ascii-second)/256\n return str(chr(first))+str(chr(second))", "def decode(str):\n s6 = re.sub('6','\\n',str)\n s5 = re.sub('5','44',s6)\n s4 = re.sub('4','33',s5)\n s3 = re.sub('3','22',s4)\n return re.sub('2',' ',s3)", "def test_decode():\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert \"\\n\" in x", "def decode(self, shortUrl: str) -> str:\n short = shortUrl.split('/')[-1]\n if short in short2long:\n return short2long[short]\n else:\n return None", "def unpack_string(self, offset, length):\n return struct.unpack_from(str(\"<%ds\") % (length), self._buf, self._offset + offset)[0]", "def decode(self):\n if self.ciphered:\n msg = self.result \n self.result = ''\n else:\n msg = self.msg\n try:\n self.result = self.doDecode(msg,self.shift)\n except Exception as e:\n raise CipherError(\"decoding failure {}.\".format(e))\n self.ciphered = False\n return self.result", "def decode(self, shortUrl: str) -> str:\n return self.lookup[shortUrl]", "def decode(self, shortUrl):\n shortUrl = shortUrl[-6:]\n if shortUrl in self.short_to_long:\n return self.short_to_long[shortUrl]", "def decode_email(email):\n return", "def decode(data): #@NoSelf", "def _DecodeAccidentalString(cls, sAccidental):\n sAcc = sAccidental.strip()\n encFlat = cls.encodingAccidentals.get(cls.flatChar, None)\n encSharp = cls.encodingAccidentals.get(cls.sharpChar, None)\n encNatural = cls.encodingAccidentals.get(cls.naturalChar, None)\n if sAcc == cls.naturalChar:\n return encNatural\n if cls._reFlat.match(sAcc):\n return encFlat*len(sAcc)\n if cls._reSharp.match(sAcc):\n return encSharp*len(sAcc)", "def decode(s):\n start = 0\n multiplier = 1\n for char in s[::-1]:\n start += multiplier * LETTERS.index(char)\n multiplier = multiplier * 58\n return start", "def from_str(cls, encstr: str) -> 'Restriction':\n encstr = re.sub(r'\\s+', '', encstr)\n ret, remainder = cls.decode(encstr)\n if len(remainder) != 0:\n raise ValueError(\"Restriction had extrs characters at end: {}\"\n .format(remainder))\n return ret", "def from_str(cls, encstr: str) -> 'Alternative':\n encstr = re.sub(r'\\s+', '', encstr)\n return cls(*re.split('([' + string.punctuation + '])', encstr, maxsplit=1))", "def decode_str(decrypted_text: bytes, encoding: str) -> Tuple[str, str]:\n msg = ''\n out = ''\n if not encoding:\n with warnings.catch_warnings(record=True) as e:\n charset_match = from_bytes(decrypted_text)\n if len(charset_match):\n out = str(charset_match[0])\n demisto.debug(f\"Decode decrypted text using {charset_match[0].encoding} encoding\")\n if e:\n msg = f'Note: encoding detection ended with warning: {e[0].message} Characters may be missing.' \\\n ' You can try running this command again and pass the encoding code as argument.\\n'\n else:\n out = decrypted_text.decode(encoding)\n\n return out, msg", "def imaputf7decode(s):\n lst = s.split('&')\n out = lst[0]\n for e in lst[1:]:\n u, a = e.split('-', 1) #u: utf16 between & and 1st -, a: ASCII chars folowing it\n if u == '':\n out += '&'\n else:\n out += b64padanddecode(u)\n out += a\n return out", "def decode_secret(secret, encoding=SecretEncoding.BASE32):\n return _decoding_map[encoding](secret)", "def random_alternative(self, fmt_string):\n # Find alternatives\n try:\n alts = self[fmt_string]\n except KeyError:\n # There are no alternatives for this string\n return fmt_string\n return random.choice(alts)", "def decode_guess(self, label, buf, pos):\n try:\n print(str(pos) + \" Guess1: trying len delim\")\n return self.decode_lendelim_message(label, buf, {}, pos), 'message'\n except Exception:\n print(str(pos) + \" Guess2: trying bytes\")\n return self.decode_bytes(buf, pos), 'bytes'", "def decode_letter(self, received_letter):\n syndrome = (np.matmul(received_letter, self.parity_check)) % 2\n\n try:\n error_vector = self.syndrome_table[tuple(syndrome)]\n except KeyError:\n error_vector = random.choice(list(self.syndrome_table.values()))\n letter_vector = (error_vector + received_letter) % 2\n\n try:\n return self.code_words[tuple(letter_vector)]\n except KeyError:\n\n return random.choice(list(self.code_words.values()))", "def DEIMdecode(byte):\n\n result = ''\n if byte & 0x80:\n if byte & 0x40: result += 'B'\n else: result += 'D'\n if byte & 0x20: result += '-'\n result += '%d' % ((byte >> 3) & 0x03)\n if byte & 0x04: result += '-'\n result += '%d' % (byte & 0x03)\n else:\n if byte == 0111: result += 'N'\n elif byte == 0151: result += 'R'\n elif byte == 0171: result += 'F'\n elif byte == 0200: result += 'P'\n else: result += 'A%3.3o' % byte\n return result", "def get_encoded_msg():\n print(\"Enter text you would like to decode:\\n\")\n e_msg = input(\">\")\n return e_msg", "def decode(self, shortUrl):\n longUrl = self.url_dict.get(shortUrl[19:],None)\n if longUrl != None:\n return longUrl\n else:\n return None", "def decode(text, password):\r\n\tstep_index = 0\r\n\tdecoded_text = ''\r\n\tfor letter in text:\r\n\t\tdecoded_text += prev_letter(letter, to_int(password[step_index]))\r\n\t\tstep_index += 1\r\n\t\tif step_index > len(password)-1:\r\n\t\t\tstep_index = 0\r\n\treturn decoded_text", "def decode(self, s):\n o = self._decoder.decode(s)\n return o", "def decode(self, s, _w=WHITESPACE.match):\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n end = _w(s, end).end()\n\n return obj", "def pre_dissect(self, s):\n if len(s) < 1:\n raise Exception(\"Invalid InnerPlaintext (too short).\")\n\n tmp_len = len(s) - 1\n if s[-1] != b\"\\x00\":\n msg_len = tmp_len\n else:\n n = 1\n while s[-n] != b\"\\x00\" and n < tmp_len:\n n += 1\n msg_len = tmp_len - n\n self.fields_desc[0].length_from = lambda pkt: msg_len\n\n self.type = struct.unpack(\"B\", s[msg_len:msg_len + 1])[0]\n\n return s", "def decode_match(s):\n if '=' in s:\n raise TypeError\n while True:\n try:\n bin = standard_b64decode(s)\n except TypeError, e:\n if str(e) != 'Incorrect padding':\n raise\n s += '='\n else:\n break\n return MatchProxy(bin)", "def get_string(opt=\"encode\"):\n text = input(f\"Enter string to {opt}: \")\n return text", "def decode(s, storage=BIT_STORAGE, alpha=ALPHABET):\n n = [ord(a) for a in s if a != TWUUENC_START and a != TWUUENC_START_ZLIB]\n bs = BitString()\n for a in n:\n for pos,l in enumerate(alpha):\n if a == l:\n bs.append(BitString(uint=pos, length=storage))\n bs.seekbyte(0)\n return bs.readbytes(len(bs)/8).data.rstrip('\\0')", "def escapeDecode(s: unicode) -> unicode:\n ...", "def dissect(self, text):", "def unpack_string(s):\n\t# Totally empty string\n\tif len(s) == 0:\n\t\treturn \"\", s\n\t\n\t# Remove the length\n\ttry:\n\t\t(l, ), s = unpack(\"I\", s)\n\texcept TypeError, e:\n\t\traise TypeError(\"Problem unpacking length of string: %s\" % e)\n\n\tif l > 0:\n\t\t# Get the string, (we don't need the null terminator so nuke it)\n\t\tif len(s) < l:\n\t\t\traise TypeError(\"Not enough data for string, length said %s bytes got %r (%s bytes)\" % (l, s, len(s)))\n\n\t\toutput = s[:l]\n\t\ts = s[l:]\n\t\t\n\t\t# Remove any extra null terminators.\n\t\tif output[-1] == '\\0':\n\t\t\toutput = output[:-1]\n\n\t\t# Make sure the string is a valid utf-8 string\n\t\t# If the sender is well behaved this does nothing...\n\t\toutput = encodings.utf_8.decode(output, errors='ignore')[0]\n\n\t\treturn output, s\n\telse:\n\t\treturn \"\", s", "def unpack(self, s):\n\n raise NotImplementedError()", "def doDecode(self):\n raise CipherError(\"override this funct and return the decoded msg\")", "def get_alt(self):\n p = self._get_sub_text('alt')\n if not p:\n return None\n else:\n try:\n return int(p)\n except ValueError:\n return None", "def b64decode(data, altchars=b'+/'):\n data = re.sub(r'[^a-zA-Z0-9%s]+' % altchars, '', data) # normalize\n missing_padding = len(data) % 4\n if missing_padding:\n data += '='* (4 - missing_padding)\n return base64.b64decode(data, altchars)", "def decode_morse(ciphertext):\r\n if not isinstance(ciphertext, str):\r\n return \"Ciphertext is not a string!\"\r\n ciphertext_copy = str(ciphertext)\r\n if len(ciphertext) == 0: #Accounts for empty string\r\n return \"\"\r\n if ciphertext_copy[-1] != \" \":\r\n ciphertext_copy += \" \" #Accounts for user variation in final trailing whitespace - we need this final whitespace for the dictionary to work\r\n #This also has the effect of returning nonsense characters we can't decode later on\r\n plaintext = \"\" #Empty string solution\r\n morse_char = \"\" #This variable will hold each letter/character's Morse code\r\n for character in ciphertext_copy:\r\n if character == \" \": #Spaces are letter delimiters\r\n morse_char += character\r\n if morse_char in ciphertext_characters:\r\n plaintext += plaintext_characters[ciphertext_characters.index(morse_char)]\r\n morse_char = \"\" #Reset the holding variable\r\n else:\r\n return \"ERROR: I can't decode the following character: \" + morse_char + \"\\nYour decoded message thus far is: \" + whitespace_sorter(plaintext)\r\n #The nature of this return statement allows tests via assertion, but will also respond to print statements accordingly.\r\n else:\r\n morse_char += character #If it's not a letter delimiter, continue building the letter/character Morse code\r\n plaintext = whitespace_sorter(plaintext)\r\n return plaintext", "def test_basic():\n assert song_decoder(\"AWUBBWUBC\") == \"A B C\"\n assert song_decoder(\"AWUBWUBWUBBWUBWUBWUBC\") == \"A B C\"\n assert song_decoder(\"WUBAWUBBWUBCWUB\") == \"A B C\"\n assert song_decoder(\"RWUBWUBWUBLWUB\") == \"R L\"\n assert song_decoder(\"WUBJKDWUBWUBWBIRAQKFWUBWUBYEWUBWUBWUBWVWUBWUB\") == \"JKD WBIRAQKF YE WV\"\n assert song_decoder(\"WUBKSDHEMIXUJWUBWUBRWUBWUBWUBSWUBWUBWUBHWUBWUBWUB\") == \"KSDHEMIXUJ R S H\"\n assert song_decoder(\"QWUBQQWUBWUBWUBIWUBWUBWWWUBWUBWUBJOPJPBRH\") == \"Q QQ I WW JOPJPBRH\"\n assert song_decoder(\"WUBWUBOWUBWUBWUBIPVCQAFWYWUBWUBWUBQWUBWUBWUBXHDKCPYKCTWWYWUBWUBWUBVWUBWUBWUBFZWUBWUB\") == \"O IPVCQAFWY Q XHDKCPYKCTWWY V FZ\"\n assert song_decoder(\"WUBYYRTSMNWUWUBWUBWUBCWUBWUBWUBCWUBWUBWUBFSYUINDWOBVWUBWUBWUBFWUBWUBWUBAUWUBWUBWUBVWUBWUBWUBJB\") == \"YYRTSMNWU C C FSYUINDWOBV F AU V JB\"\n assert song_decoder(\"WUBKSDHEMIXUJWUBWUBRWUBWUBWUBSWUBWUBWUBHWUBWUBWUB\") == \"KSDHEMIXUJ R S H\"\n assert song_decoder(\"AWUBWUBWUB\") == \"A\"\n assert song_decoder(\"AWUBBWUBCWUBD\") == \"A B C D\"\n assert song_decoder(\"WUBWWUBWUBWUBUWUBWUBBWUB\") == \"W U B\"\n assert song_decoder(\"WUWUBBWWUBUB\") == \"WU BW UB\"\n assert song_decoder(\"WUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUABWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUB\") == \"WUAB\"\n assert song_decoder(\"U\") == \"U\"\n assert song_decoder(\"WUWUB\") == \"WU\"\n assert song_decoder(\"UBWUB\") == \"UB\"\n assert song_decoder(\"WUWUBUBWUBUWUB\") == \"WU UB U\"\n assert song_decoder(\"WUBWWUBAWUB\") == \"W A\"\n assert song_decoder(\"WUUUUU\") == \"WUUUUU\"\n assert song_decoder(\"WUBWUBA\") == \"A\"", "def urlDecode(s):\n\tmychr = chr\n\tatoi = string.atoi\n\tparts = string.split(string.replace(s, '+', ' '), '%')\n\tfor i in range(1, len(parts)):\n\t\tpart = parts[i]\n\t\tparts[i] = mychr(atoi(part[:2], 16)) + part[2:]\n\treturn string.join(parts, '')", "def translate_leet(phrase):", "def get_decoded_value(self, encoded_s):\n stack = Stack()\n node = self.get_root().get_value()[1]\n state = State(node)\n stack.push(state)\n count = 0\n decoded_s = \"\"\n \n while node and count < len(encoded_s):\n if encoded_s[count] == '0':\n if type(node) == str:\n decoded_s += node\n node = self.get_root().get_value()[1]\n if node.has_left_child(): \n node = node.get_left_child()[1]\n # Handle case for the last character found from the encoded string\n if count == len(encoded_s) - 1:\n decoded_s += node\n count += 1\n else: # Go to right node\n if type(node) == str:\n decoded_s += node\n node = self.get_root().get_value()[1]\n if node.has_right_child(): \n node = node.get_right_child()[1]\n # Handle case for the last character found from the encoded string\n if count == len(encoded_s) - 1:\n decoded_s += node\n count += 1\n\n return decoded_s", "def decode(string: str) -> str:\n a = re.sub(r\"/\\+/g\", ' ', unquote(string))\n return a", "def extract_string(line, idx, result):\n\n begin = line.find(resource_string_prefix, idx)\n if begin == -1:\n return -1\n \n begin = begin + len(resource_string_prefix)\n end = -1\n for i in range(begin, len(line)):\n if not is_valid_char(line[i]):\n end = i\n break\n\n result.add(line[begin:end])\n return end", "def decode(s):\n decoded = 0\n multi = 1\n s = s[::-1]\n for char in s:\n decoded += multi * alphabet.index(char)\n multi = multi * base_count\n \n return decoded", "def _decode_str(self, buf):\n length = self._decode_vint(buf)\n result = buf.read(length)\n if len(result) != length:\n raise EndOfMessage(True)\n return result", "def kwextract(s):\n try:\n return strip(s, \"$\").strip().split(\": \")[1]\n except IndexError:\n return \"<unknown>\"", "def decode(self, shortUrl):\n pass", "def decode(self, encoded: str):\n if not isinstance(encoded, str) or not encoded:\n return None\n int_encoded = self._decode_str(encoded)\n if int_encoded is None:\n return None\n int_origin = self._int_obfuscator.decode(int_encoded)\n if int_origin is None:\n return None\n str_encoded = self.__encode(int_origin)\n return int_origin if str_encoded == encoded else None", "def decode_text():\n print(f\"{YELLOW}[{MIDDLE_DOT}]{RESET} Enter message to decode: \", end=\"\")\n message = input()\n extract_encoded_message = message.split(LEFT_TO_RIGHT_MARK)[1]\n message = extract_encoded_message\n extract_encoded_message = message.split(RIGHT_TO_LEFT_MARK)[0]\n encoded = ''\n decoded = ''\n\n for message_char in message:\n if message_char in zero_space_symbols:\n encoded = encoded + str(zero_space_symbols.index(message_char))\n\n cur_encoded_char = ''\n\n for index, encoded_char in enumerate(encoded):\n cur_encoded_char = cur_encoded_char + encoded_char\n if index > 0 and (index + 1) % padding == 0:\n decoded = decoded + chr(int(cur_encoded_char, len(zero_space_symbols)))\n cur_encoded_char = ''\n\n return decoded", "def decode_from_value(byte_iter):\n value_length = wsp_pdu.Decoder.decode_value_length(byte_iter)\n # See what token we have\n byte = byte_iter.next()\n if byte == 129: # Insert-address-token\n return '<not inserted>'\n\n return MMSDecoder.decode_encoded_string_value(byte_iter)", "def polite_string(a_string):\n if is_py3() and hasattr(a_string, 'decode'):\n try:\n return a_string.decode('utf-8')\n except UnicodeDecodeError:\n return a_string\n\n return a_string", "def decode(self, shortUrl):\n v = shortUrl[20:len(shortUrl)]\n return (self.hash[int(v)])", "def decode_base64(data, altchars=b'+/'):\n data = re.sub(rb'[^a-zA-Z0-9%s]+' % altchars, b'', data) # normalize\n missing_padding = len(data) % 4\n if missing_padding:\n data += b'='* (4 - missing_padding)\n return base64.b64decode(data, altchars)", "def parse(s):\n return s", "def unicodise(string, encoding = None, errors = \"replace\"):\n\n\tif not encoding:\n\t\tencoding = Config.Config().encoding\n\n\tif type(string) == unicode:\n\t\treturn string\n\tdebug(\"Unicodising %r using %s\" % (string, encoding))\n\ttry:\n\t\treturn string.decode(encoding, errors)\n\texcept UnicodeDecodeError:\n\t\traise UnicodeDecodeError(\"Conversion to unicode failed: %r\" % string)", "def decode(src):\n if not src:\n return None\n\n return scramble(src)", "def test_decode():\n enig = Enigma(534, 16, 8, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])\n string = \"\"\"-)m>&)IKp[1`Sro$82[@_`TV&`f%}|<]a1R*\\W4IEb6j@+':`R[.(1$vV4rTJ2\n6V?5.;8q r%0p@+[Ir7-?rzIl;nV<4W7,PD[5-?;RE+~vR5-`i}>=z@S \"eJ`8g:S:1ir\nE0=<F0~/;6).\"\"\"\n decoded = \"\"\"Hello, this is a test string. I will follow this with a return\nbringing it onto a new line. I can do this forever, but I won't. Just\nfor a while.\"\"\"\n\n enig.setrotsettings([5, 2, 2, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5])\n assert_equal(decoded, enig.decode(string))\n\n startsettings = [4, 6, 0, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5]\n assert_equal(startsettings, enig.getrotsettings())", "def decode(self, shortUrl: str) -> str:\n url = shortUrl.split('/')[-1]\n idx = int(url)\n \n return self.reverse_map[idx]", "def decode(self, shortUrl):\n return self.decode_map[shortUrl]", "def decode(self, shortUrl):\n return self.decode_map[shortUrl]", "def raw_decode(self, s, idx=0, _w=WHITESPACE.match, _PY3=PY3):\r\n if _PY3 and not isinstance(s, text_type):\r\n raise TypeError(\"Input string must be text, not bytes\")\r\n try:\r\n obj, end = self.scan_once(s, idx=_w(s, idx).end())\r\n except StopIteration:\r\n raise JSONDecodeError(\"No JSON object could be decoded\", s, idx)\r\n return obj, end", "def decode(self, value):\r\n return value", "def raw_decode(self, s, idx=0):\n try:\n obj, end = self.scan_once(s, idx)\n except StopIteration as err:\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\n return obj, end", "def decode1(s):\n rv = []\n idx = 0\n item = ''\n while True:\n try:\n if s[idx:idx+2] == '+,':\n rv.append(item)\n item = ''\n idx += 2\n elif s[idx:idx+2] == '++':\n item += '+'\n idx += 2\n else:\n item += s[idx]\n idx += 1\n except IndexError:\n rv.append(item)\n break\n return rv", "def get_alt(self, dec, i_alt):\n return self.decisions[dec].value[i_alt]", "def decode_when_needed(result):\n return result.decode('utf-8') if isinstance(result, bytes) else result", "def base64_decode(n, encoding='ISO-8859-1'):\t\n decoded = base64.decodestring(n.encode('ascii'))\t\n return tonative(decoded, encoding)", "def _decode(data: BencodedString) -> Union[bytes, dict, int, list]:\n if not data.bytes:\n raise ValueError(\"Cannot decode an empty bencoded string.\")\n\n if data.bytes[0] == START_DICT:\n return _decode_dict(data)\n\n if data.bytes[0] == START_LIST:\n return _decode_list(data)\n\n if data.bytes[0] == START_INTEGER:\n return _decode_int(data)\n\n if chr(data.bytes[0]).isdigit():\n return _decode_bytes(data)\n\n raise ValueError(\n \"Cannot decode data, expected the first byte to be one of \"\n f\"'d', 'i', 'l' or a digit, got {chr(data.bytes[0])!r} instead.\"\n )", "def decode(self, shortUrl):\n return self.demap[shortUrl]", "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)", "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)", "def decode_position(s):\n if '=' in s:\n raise TypeError\n while True:\n try:\n bin = standard_b64decode(s)\n except TypeError, e:\n if str(e) != 'Incorrect padding':\n raise\n s += '='\n else:\n break\n r = twoside_decode(bin)\n if not len(r) == 2 or not len(r[0])==25 or not len(r[1])==25:\n raise bglib.encoding.DecodeError('got bad data: %s '%(s,))\n return r", "def partialMorseCodeTest():\r\n\r\n\t# This is a partial representation of the word TEST, amongst other possible combinations\r\n\ttest = ['x','x','x..','x']\r\n\tprint(morsePartialDecode(test))\r\n\r\n\t# This is a partial representation of the word DANCE, amongst other possible combinations\r\n\tdance = ['x..','x-','x.','x.-.','x']\r\n\tprint(morsePartialDecode(dance))", "def decode_data ( data ) :\n cipher = get_cipher( data )\n index = 0\n firstpass = []\n datalen = len( data )\n while index < datalen :\n if index % 2 == 0 :\n firstpass.append( chr( ord( data[ index ] ) - cipher ) )\n else :\n firstpass.append( chr( ord( data[ index ] ) + cipher ) )\n index += 1\n\n firstpass[ 0 ] = data[ 0 ]\n firstpass[ -1 ] = data[ -1 ]\n firstpass[ -2 ] = data[ -2 ]\n decoded_data = ''.join( firstpass )\n return base64.b64decode( decoded_data )", "def decodepkt(self, pkt):\n res = \"\"\n if pkt.startswith('$'):\n try:\n self.logger.debug('unpack< %s', pkt) \n res = self.unpack(pkt)\n except ValueError as ex:\n self.logger.debug('GDB-< %s', res)\n self.logger.warning('Bad packet %s', ex) \n self.s.send(b'-')\n else:\n self.s.send(b'+')\n self.logger.debug('GDB+< %s', res) \n return res\n else:\n self.logger.warning('discards %s', pkt)", "def read_string(self):\n\n # length may be -1, 0, or a positive integer\n length = self.read_and_unpack('l')[0]\n if length > 0:\n return self.read(length).decode(self.utf_16_decoder)\n else:\n return ''", "def decode_message(self, key):\n\n decoded_message = ''\n for char in self.message:\n if char.isalpha():\n decoded_char = self.convert_char(char, key)\n decoded_message = decoded_message + decoded_char\n else:\n decoded_message = decoded_message + char\n return decoded_message", "def read_str(self, p, offset, default_, additional_size):\n if p == 0:\n return default_\n assert ptr.kind(p) == ptr.LIST\n assert ptr.list_size_tag(p) == ptr.LIST_SIZE_8\n start = ptr.deref(p, offset)\n end = start + ptr.list_item_count(p) + additional_size\n return self.buf[start:end]", "def decode(self, eth):\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_ARP:\n\t\t\t# print 'arp'\n\t\t\treturn ARP(eth.data).get()\n\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP6:\n\t\t\tip = eth.data\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# multicast is just like IPv4\n\t\t\t\tif udp.dport == 5353:\n\t\t\t\t\t# print udp\n\t\t\t\t\tans = mDNS(udp).get()\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\t# pp.pprint(ans)\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\treturn ans\n\n\t\t\t\t# print 'IPv6 UDP','port:',udp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# TCP not useful\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\tpass\n\t\t\t\t# tcp = ip.data\n\t\t\t\t# print 'IPv6 TCP','port:',tcp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# ICMP error msg not useful for mapping\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t\t# print 'IPv6 icmp6:',ip.data.data\n\t\t\t\tpass\n\n\t\t\t# other stuff I haven't decoded\n\t\t\telse:\n\t\t\t\tpass\n\t\t\t\t# print 'IPv6',ip.p,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t\tip = eth.data\n\n\t\t\t# roku interface port: 1900 dst: 239.255.255.250 1900\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# these aren't useful\n\t\t\t\tif udp.dport == 53: # DNS\n\t\t\t\t\t# return DNS(udp.data)\n\t\t\t\t\treturn {}\n\n\t\t\t\telif udp.dport == 5353: # mDNS\n\t\t\t\t\t# print 'mDNS'\n\t\t\t\t\t# print udp\n\t\t\t\t\treturn mDNS(udp).get()\n\n\t\t\t\telif self.getip(ip.dst) == '239.255.255.250':\n\t\t\t\t\treturn {}\n\n\t\t\t\telse:\n\t\t\t\t\t# don't print standard ports\n\t\t\t\t\t# 17500 dropbox\n\t\t\t\t\t# if not ip.data.dport in [17500]:\n\t\t\t\t\t# \tprint 'other udp','port:',udp.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst),': '\n\t\t\t\t\treturn {}\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\t# src = self.getip(ip.src)\n\t\t\t\t# if netaddr.IPAddress(src) not in netaddr.IPNetwork(\"192.168.1.0/24\"):\n\t\t\t\t# \twho = ''\n\t\t\t\t# \tif src not in self.ipMap:\n\t\t\t\t# \t\twho = WhoIs(src).record['NetName']\n\t\t\t\t# \t\tself.ipMap[src] = who\n\t\t\t\t# \telse:\n\t\t\t\t# \t\twho = self.ipMap[src]\n\t\t\t\t# \tif who in ['GOOGLE','AKAMAI','APPLE-WWNET','AMAZO-ZIAD1','DROPBOX']:\n\t\t\t\t# \t\treturn {}\n\t\t\t\t# \telse:\n\t\t\t\t# \t\tprint src,who\n\t\t\t\t# don't print standard ports\n\t\t\t\t# port 58969 - XSANS Apple, why do i see that?\n\t\t\t\t# 22 ssh\n\t\t\t\t# 25 smtp\n\t\t\t\t# 80 http\n\t\t\t\t# 123 time server\n\t\t\t\t# 143 imap\n\t\t\t\t# 443 https\n\t\t\t\t# 445 smb\n\t\t\t\t# 548 afp over tcp\n\t\t\t\t# 5009 airport admin utility\n\t\t\t\t# 5222 ichat\n\t\t\t\t# 17500 dropbox\n\t\t\t\t# if not ip.data.dport in [22,25,80,123,143,443,445,548,5009,5222,17500]:\n\t\t\t\t\t# print 'other tcp','port:',ip.data.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}\n\t\t\t# elif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t# \tprint '?????? other icmp6','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telif ip.p == 2:\n\t\t\t\tpass\n\t\t\t\t# print 'IGMP','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telse:\n\t\t\t\t# print 'other ip packet','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}" ]
[ "0.66574126", "0.6132067", "0.6132067", "0.5771838", "0.5631769", "0.5482572", "0.5471471", "0.54456496", "0.54365635", "0.537955", "0.5371491", "0.53414893", "0.5331267", "0.52930796", "0.52915156", "0.52808076", "0.527762", "0.5274057", "0.5274057", "0.5270972", "0.5267768", "0.5244202", "0.52166", "0.5177876", "0.5152874", "0.5149147", "0.51428133", "0.51424", "0.5142063", "0.51395404", "0.51394427", "0.51151305", "0.50957245", "0.50950444", "0.5088075", "0.5086042", "0.5064707", "0.50586885", "0.50490713", "0.5043681", "0.5038145", "0.50366396", "0.502936", "0.50282675", "0.5025795", "0.50024635", "0.49856254", "0.4984618", "0.49834087", "0.4966717", "0.49644464", "0.49390703", "0.49389833", "0.4935601", "0.49342826", "0.4922943", "0.4919144", "0.49139884", "0.490897", "0.49029756", "0.48996067", "0.48990592", "0.48988944", "0.4890687", "0.48785597", "0.4877922", "0.48737285", "0.48725858", "0.48672387", "0.48660755", "0.48631394", "0.48624215", "0.4860207", "0.4857737", "0.48501283", "0.48460463", "0.48365718", "0.48355004", "0.48319983", "0.48214388", "0.48214388", "0.4814059", "0.48051104", "0.48013467", "0.4800382", "0.4799625", "0.47993118", "0.478801", "0.47871843", "0.47862026", "0.4786202", "0.4786202", "0.47858045", "0.47616783", "0.47583127", "0.47513613", "0.47482145", "0.47477546", "0.47445577", "0.47438127" ]
0.59058076
3
Turns this userreadable string into an Alternative (no escaping)
def from_str(cls, encstr: str) -> 'Alternative': encstr = re.sub(r'\s+', '', encstr) return cls(*re.split('([' + string.punctuation + '])', encstr, maxsplit=1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")", "def decode(cls, encstr: str) -> Tuple['Alternative', str]:\n cond = None\n end_off = 0\n\n # Swallow field up to conditiona\n while end_off < len(encstr):\n if encstr[end_off] in string.punctuation:\n cond = encstr[end_off]\n break\n end_off += 1\n if cond is None:\n raise ValueError('{} does not contain any operator'\n .format(encstr))\n field = encstr[:end_off]\n end_off += 1\n\n value = ''\n while end_off < len(encstr):\n if encstr[end_off] == '|':\n # We swallow this\n end_off += 1\n break\n if encstr[end_off] == '&':\n break\n if encstr[end_off] == '\\\\':\n end_off += 1\n value += encstr[end_off]\n end_off += 1\n\n return cls(field, cond, value), encstr[end_off:]", "def decode(cls, encstr: str) -> Tuple['Restriction', str]:\n alts = []\n while len(encstr) != 0:\n if encstr.startswith('&'):\n encstr = encstr[1:]\n break\n alt, encstr = Alternative.decode(encstr)\n alts.append(alt)\n return cls(alts), encstr", "def _sanitize_string(self, string):\n # get the type of a unicode string\n unicode_type = type(Pyasciigraph._u('t'))\n input_type = type(string)\n if input_type is str:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = string\n elif input_type is unicode_type:\n info = string\n elif input_type is int or input_type is float:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = str(string)\n else:\n info = str(string)\n return info", "def _hidden_in_unicode(self, txt):", "def random_alternative(self, fmt_string):\n # Find alternatives\n try:\n alts = self[fmt_string]\n except KeyError:\n # There are no alternatives for this string\n return fmt_string\n return random.choice(alts)", "def unicodise_safe(string, encoding = None):\n\n\treturn unicodise(deunicodise(string, encoding), encoding).replace(u'\\ufffd', '?')", "def escapeDecode(s: unicode) -> unicode:\n ...", "def _avert_unallowable(raw_string, escape_double_special_characters=False):\n output = []\n for c in raw_string:\n if c in _caret_escapes:\n output.append(_caret_escapes[c])\n elif escape_double_special_characters and c == '\"':\n output.append('^\"')\n else:\n output.append(c)\n return ''.join(output)", "def autoconvert(s):\n try:\n return eval(s)\n except:\n return s", "def unstringify(cls, s: str, unescape_pipe: bool = True)->str:\n if s.startswith(cls.LANGUAGE_QUALIFIED_STRING_SIGIL):\n language: str\n s, language = s.rsplit(\"@\", 1)\n if unescape_pipe:\n s = s.replace('\\\\|', '|')\n return ast.literal_eval(s)", "def unescape(input):\n output=atpic.cleaner_escape.unescape(input)\n return output", "def polite_string(a_string):\n if is_py3() and hasattr(a_string, 'decode'):\n try:\n return a_string.decode('utf-8')\n except UnicodeDecodeError:\n return a_string\n\n return a_string", "def normalize_repr(item_repr):\n return DEFAULT_REPR_RE.sub('', item_repr)", "def rl_unescape_prompt(prompt: str) -> str:\n if rl_type == RlType.GNU:\n escape_start = \"\\x01\"\n escape_end = \"\\x02\"\n prompt = prompt.replace(escape_start, \"\").replace(escape_end, \"\")\n\n return prompt", "def parse(s):\n return s", "def _sanitize_string_for_python(self, s):\n s = repr(s)\n\n if s.startswith('u'):\n s = s[1:]\n\n return s", "def escape_for_display(s) :\n if len(s) == 0 :\n return \"[EMPTY]\"\n return s.replace(\"\\n\",\"[NL]\").replace(\"\\t\",\"[TAB]\") #.replace(\" \",\"[SP]\") # Escape newlines so not to confuse debug output.", "def __normalize_string(self, string):\n\n if self._dia & self._DIA_PRE93:\n string = string.replace(u\"Â\", u\"Î\")\n string = string.replace(u\"ROMÎNĂ\", u\"ROMÂNĂ\")\n elif self._dia & self._DIA_POST93:\n string = string.replace(u\"Î\", u\"Â\")\n string = string.replace(u\"Â \", u\"Î\")\n\n if self._dia & self._DIA_CEDILLA:\n string = string.replace(u\"Ș\", u\"Ş\")\n string = string.replace(u\"Ț\", u\"Ţ\")\n elif self._dia & self._DIA_COMMA:\n string = string.replace(u\"Ş\", u\"Ș\")\n string = string.replace(u\"Ţ\", u\"Ț\")\n\n if self._dia & self._DIA_NONE:\n string = string.replace(u\"Î\", u\"I\")\n string = string.replace(u\"Â\", u\"A\")\n string = string.replace(u\"Ă\", u\"A\")\n string = string.replace(u\"Ș\", u\"S\")\n string = string.replace(u\"Ț\", u\"T\")\n\n return string", "def normalize(self, text: str) -> str:", "def raw(string):\n string = string or \"\"\n return string.replace(\"{\", \"{{\").replace(\"|\", \"||\")", "def process_string(string: str) -> str:\n\n return string if string else Presenter.DEFAULT", "def optionxform(self, optionstr):\r\n return optionstr", "def optionxform(self, optionstr):\r\n return optionstr", "def convert_text(s):\n for d in config.repl: # loaded from config.py\n if \"flags\" in d:\n s = re.sub(d[\"ptrn\"], d[\"repl\"], s, flags=d[\"flags\"])\n else:\n s = re.sub(d[\"ptrn\"], d[\"repl\"], s)\n return s", "def haiku_string_parser():\n pass", "def fix_output(text: str) -> str:\n\n text = text.replace(\" n't\", \"n't\")\n return text", "def polishString(s): \n return re.sub(\"[/\\\\\\?\\|<>:\\\"\\*]\",\"_\",s).strip()", "def from_dual(self):\n return \"\"", "def beautify(self, string):\n\n\t\tif not string:\n\t\t\treturn string\n\n\t\t# string may differ because of escaped characters\n\t\tstring, phrases = self.parse(string)\n\n\t\tif not phrases:\n\t\t\treturn string\n\n\t\tif not self.positional and not self.always:\n\t\t\traise errors.ArgumentError(\"Found phrases, but no styles \"\n\t\t\t\t\t\t\t\t\t \"were supplied!\")\n\n\t\treturn self.stringify(string, phrases)", "def replace(self, lexeme_type: Type, buffer: str):\r\n\r\n self.type = lexeme_type\r\n self.value = buffer", "def unicodise(string, encoding = None, errors = \"replace\"):\n\n\tif not encoding:\n\t\tencoding = Config.Config().encoding\n\n\tif type(string) == unicode:\n\t\treturn string\n\tdebug(\"Unicodising %r using %s\" % (string, encoding))\n\ttry:\n\t\treturn string.decode(encoding, errors)\n\texcept UnicodeDecodeError:\n\t\traise UnicodeDecodeError(\"Conversion to unicode failed: %r\" % string)", "def from_string(cls, dlstr):\n raise NotImplementedError(\"Should be implemented by subclass\")", "def get_data_from_nonformat_text():\n pass", "def nemo(a_string):\n return NemoParser().parse(a_string)", "def prepare_input(self, extracted_str):\n\n # Remove withspace\n if self.options['remove_whitespace']:\n optimized_str = re.sub(' +', '', extracted_str)\n else:\n optimized_str = extracted_str\n \n # Remove accents\n if self.options['remove_accents']:\n optimized_str = unidecode(optimized_str)\n\n # specific replace\n for replace in self.options['replace']:\n assert len(replace) == 2, 'A replace should be a list of 2 items'\n optimized_str = optimized_str.replace(replace[0], replace[1])\n\n return optimized_str", "def clean_up(text_not_allowed, raw_string):\n trans_table = str.maketrans(dict.fromkeys(text_not_allowed))\n clean_result = raw_string.translate(trans_table)\n return clean_result", "def decode(self, s):", "def decode(self, s):", "def tr(self, string1, string2, source, option=''):\r\n \r\n def is_valid_type(source):\r\n return isinstance(source, str)\r\n \r\n def make_char_list(source):\r\n char_list = []\r\n back_slash = False\r\n hyphen = False\r\n for char in source:\r\n if char == '\\\\':\r\n if not back_slash:\r\n back_slash = True\r\n continue\r\n elif char == '-' and not back_slash:\r\n hyphen = True\r\n continue\r\n elif hyphen:\r\n start = char_list[-1] + 1\r\n char_list += range(start, ord(char))\r\n char_list.append(ord(char))\r\n back_slash = False\r\n hyphen = False\r\n return char_list\r\n \r\n def to_unichr(char_list):\r\n return map(chr, char_list)\r\n \r\n def squeeze(from_list, source):\r\n for char in from_list:\r\n squeeze_pattern = re.compile('%s{2,}' % char)\r\n source = squeeze_pattern.sub(char, source)\r\n return source\r\n \r\n def translate(from_list, to_list, source):\r\n translate_dict = dict(zip(from_list, to_list))\r\n return source.translate(translate_dict)\r\n \r\n if not is_valid_type(source):\r\n raise TypeError('source string must be unicode')\r\n \r\n if option == 's':\r\n from_list = make_char_list(string1)\r\n from_list = to_unichr(from_list)\r\n return squeeze(from_list, source)\r\n elif 'c' in option:\r\n from_list = make_char_list(string1)\r\n from_list = to_unichr(from_list)\r\n from_list = [ord(c) for c in set(source) - set(from_list)]\r\n if 'd' in option:\r\n to_list = [None for i in from_list]\r\n else:\r\n to_list = [string2[-1] for i in from_list]\r\n source = translate(from_list, to_list, source)\r\n if 's' in option:\r\n source = squeeze(to_list, source)\r\n return source\r\n elif 'd' in option:\r\n from_list = make_char_list(string1)\r\n to_list = [None for i in from_list]\r\n source = translate(from_list, to_list, source)\r\n if 's' in option:\r\n to_list = make_char_list(string2)\r\n to_list = to_unichr(to_list)\r\n source = squeeze(to_list, source)\r\n return source\r\n else:\r\n from_list = make_char_list(string1)\r\n to_list = make_char_list(string2)\r\n to_list = to_unichr(to_list)\r\n return translate(from_list, to_list, source)", "def prepare_nlu_text(example: Text, entities: List[Dict]):\n if not Utility.check_empty_string(example):\n if entities:\n for entity in entities:\n example = example.replace(\n entity[\"value\"],\n \"[\" + entity[\"value\"] + \"](\" + entity[\"entity\"] + \")\",\n )\n return example", "def sanitize_string(unclean_string: str) -> str:\n return unidecode(unclean_string)", "def unicodise(string, encoding = None, errors = \"replace\"):\n global preferred_encoding\n \n if not encoding:\n encoding = preferred_encoding\n\n if type(string) == unicode:\n return string\n try:\n return string.decode(encoding, errors)\n except UnicodeDecodeError:\n raise UnicodeDecodeError(\"Conversion to unicode failed: %r\" % string)", "def _(string, doNotTranslate=False):\n if doNotTranslate:\n return string\n if type(string) not in (str, unicode):\n return string\n return string_cache.get(string, string.replace(\"\\n\", \"\\n\\n\"))", "def egest(s):\n if isinstance(s, rawtype):\n return s\n elif isinstance(s, safetype): # On python 2 str is bytes.\n return str2raw(s)\n else:\n raise TypeError(\"Can't egest data of type %s\" % type(s))", "def deunicodise(string, encoding = None, errors = \"replace\"):\n\n\tif not encoding:\n\t\tencoding = Config.Config().encoding\n\n\tif type(string) != unicode:\n\t\treturn str(string)\n\tdebug(\"DeUnicodising %r using %s\" % (string, encoding))\n\ttry:\n\t\treturn string.encode(encoding, errors)\n\texcept UnicodeEncodeError:\n\t\traise UnicodeEncodeError(\"Conversion from unicode failed: %r\" % string)", "def processDescrString(self):\n\t\tself.descrString = self._getVal(4, 1)", "def xStr(s):\n return s or 'N/A'", "def normalize(string):\n string = string.replace('E', 'e')\n string = re.sub('(?<!e)-', ' -', string)\n string = re.sub('[ \\n\\r\\t,]+', ' ', string)\n string = re.sub(r'(\\.[0-9-]+)(?=\\.)', r'\\1 ', string)\n return string.strip()", "def ConvertUnknownStringToProperValue(value: str, vtype: FClass.FiMVariableTypes):\n if value == \"nothing\":\n no_value = {\n FClass.FiMVariableTypes.BOOL: False,\n FClass.FiMVariableTypes.NUMBER: 0,\n FClass.FiMVariableTypes.STRING: \"\\\"\\\"\",\n FClass.FiMVariableTypes.CHAR: \"'?'\"\n }\n if vtype in no_value: return no_value[ vtype ]\n \n if vtype == FClass.FiMVariableTypes.CHAR:\n if not value.startswith(\"'\") and not value.endswith(\"'\") and len(value[1:-1]) != 1:\n raise Exception(f\"Invalid char {value}\")\n return value\n if vtype == FClass.FiMVariableTypes.STRING:\n if not value.startswith('\"') and not value.endswith('\"'):\n raise Exception(f\"Invalid string {value}\")\n return value\n if vtype == FClass.FiMVariableTypes.NUMBER:\n try:\n return float(value)\n except:\n raise Exception(f\"Invalid number {value}\")\n if vtype == FClass.FiMVariableTypes.BOOL:\n try:\n return ConvertBooleanString( value )\n except:\n raise Exception(f\"Invalid boolean {value}\")", "def transcribe(dna_string):\n from utils import thymine, uracil\n return dna_string.replace(thymine, uracil)", "def from_string(cls, dlstr):\n\n NotImplementedError(\"Should be implemented by subclass\")", "def test_value_special_chars(self):\n raw = [\n 0x48,\n 0x65,\n 0x79,\n 0x21,\n 0x3F,\n 0x24,\n 0x20,\n 0xC4,\n 0xD6,\n 0xDC,\n 0xE4,\n 0xF6,\n 0xFC,\n 0xDF,\n ]\n string = \"Hey!?$ ÄÖÜäöüß\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def unescape(s):\n\n\tif s is None:\n\t\treturn \"\"\n\n\t# html entities\n\ts = s.replace(\"&#13;\", \"\\r\")\n\n\t# standard html\n\ts = s.replace(\"&lt;\", \"<\")\n\ts = s.replace(\"&gt;\", \">\")\n\ts = s.replace(\"&amp;\", \"&\") # this has to be last\n\n\treturn s", "def get_alt(self):\n p = self._get_sub_text('alt')\n if not p:\n return None\n else:\n try:\n return int(p)\n except ValueError:\n return None", "def convert(string, sanitize=False):\n return r.convert(string, (preprocess if sanitize else False))", "def escape_lg(s):\n return s.replace('>','\\>').replace('<','\\<')", "def native_(s, encoding='latin-1', errors='strict'):\n if isinstance(s, text_type):\n return s\n return str(s, encoding, errors)", "def annon(**kwargs):\n kwargs.update({\"__str__\": _annonStr})\n return type('',(),kwargs)()", "def clean_exception(v):\n v = re.sub(r\"\\[\\[[^]|]*\\|([^]]*)\\]\\]\", r\"\\1\", v)\n v = re.sub(r\"\\[\\[\", \"\", v)\n v = re.sub(r\"\\]\\]\", \"\", v)\n v = re.sub(r\"``+\", \"\", v)\n v = re.sub(r\"''+\", \"\", v)\n v = re.sub(r\"(?is)<sup>.*?</sup>\", \"\", v)\n v = re.sub(r\"<[^>]*>\", \"\", v)\n v = re.sub(\"\\u2019\", \"'\", v) # Note: no r\"...\" here!\n v = re.sub(r\" abbr. .*\", \"\", v)\n v = re.sub(r\"\\s+\", \" \", v)\n return v.strip()", "def _clean(sentence, subword_option):\n sentence = sentence.strip()\n if subword_option is not None and '@' in subword_option:\n subword_option_0 = subword_option.split('@')[0]\n subword_option_1 = subword_option.split('@')[1]\n else:\n subword_option_0 = None\n subword_option_1 = None\n # BPE\n if subword_option_0 == \"bpe\":\n sentence = re.sub(\"@@ \", \"\", sentence)\n\n # SPM\n elif subword_option_0 == \"spm\":\n sentence = u\"\".join(sentence.split()).replace(u\"\\u2581\", u\" \").lstrip()\n\n # speical for chinese\n if subword_option_1 == 'bpe':\n sentence = re.sub(\"@@ \", \"\", sentence)\n if subword_option_1 == 'space':\n sentence = sentence.replace(\" \", \"\")\n sentence = sentence.replace(\"<SPACE>\",\" \")\n if subword_option_1 == 'char':\n sentence = sentence.replace(\"<SPACE>\", \"\")\n sentence = sentence.replace(\"@@\", \"\")\n sentence = sentence.replace(\" \",\"\")\n sentence = \" \".join(sentence)\n elif subword_option_1 == 'char2char':\n sentence = sentence.replace(\" \", \"\")\n sentence = sentence.replace(\"@@\", \"\")\n sentence = \" \".join(sentence)\n elif subword_option_1 == 'char2word':\n sentence = sentence.replace(\" \", \"\")\n sentence = sentence.replace(\"@@\", \" \")\n # sentence = \" \".join(sentence)\n elif subword_option_1 == 'hybrid':\n sentence = sentence.replace(\" @@ \", \"\")\n sentence = sentence.replace(\"@@ \", \"\")\n sentence = sentence.replace(\" @@\", \"\")\n elif subword_option_1 == 'hybrid2':\n sentence = sentence.replace(\" \", \"\")\n sentence = sentence.replace(\"@@\", \" \")\n return sentence", "def get_string(opt=\"encode\"):\n text = input(f\"Enter string to {opt}: \")\n return text", "def abstract_plaintext(self, include_shortened=False):\n text = ''\n shortened = False\n if self.abstract:\n text = self.abstract\n elif self.description:\n for block in json.loads(self.description)['data']:\n if block.get('type') == 'text':\n data = block['data']\n # Naive string shortener\n if len(data['text']) > settings.ABSTRACT_LENGTH:\n trimmed = data['text'][:settings.ABSTRACT_LENGTH]\n trimmed = trimmed[:trimmed.rindex(' ')]\n text = trimmed\n shortened = True\n else:\n text = data['text']\n break\n if include_shortened:\n return text, shortened\n else:\n return text", "def sanitize(instring):\r\n return instring.encode('ascii','replace')", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def safe_decode_inner(s):\n if isinstance(s, unicode):\n return s\n for encoding in preflist:\n try:\n return s.decode(encoding, 'strict')\n except UnicodeDecodeError:\n if logger is not None:\n logger.warn(\"Assuming %(encoding)r, can't decode %(s)r\",\n locals())\n if errors != 'strict' and preferred:\n return s.decode(preferred, errors)\n raise", "def just_replace_strings_with_dashes(self, artist: str) -> str:\n data = re.sub(' ', '-', artist)\n\n return data", "def strip_other_charcter():\n pass", "def default_label_sanitizer(s: str) -> str:\n\n out: str = unidecode.unidecode(s)\n\n # Remove invalid characters\n out = re.sub(r\"[^0-9a-zA-Z_]\", \"_\", out)\n\n # Remove leading characters until we find a letter or underscore\n out = re.sub(r\"^[^a-zA-Z_]+\", \"_\", out)\n\n return out", "def display_unicode(self, string):\n if string is None:\n return ''\n return string.decode(\"utf16\", \"ignore\").encode(\"ascii\", 'backslashreplace')", "def from_str(cls, string):", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def ntriples_unquote(input: str) -> str:\n old_validate = ntriples.validate\n try:\n ntriples.validate = False\n return ntriples.unquote(input)\n finally:\n ntriples.validate = old_validate", "def tidy_string(s: str\n ) -> str:\n s = s.encode('ascii', errors='ignore').decode(FORMAT)\n s = s.replace(\"\\r\", \"\").replace(\"\\t\", \"\").replace('\\n', '') \n return s", "def _convertUselessWords(s, encod='utf-8'): \n if not isinstance(s, unicode):\n s = unicode(s, encod,'replace')\n \n #Remove www.* or https?://* \n s = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))','',s) \n #Remove @username\n s = re.sub('@[^\\s]+','',s) \n #Replace #word with word \n s = re.sub(r'#([^\\s]+)', r'\\1', s) \n \n return s", "def parse(self,value):\r\n\t\treturn unicode(value)", "def transcribe(dna):\n str = ''\n dict = {'C': 'C', 'G': 'G', 'A': 'A', 'T': 'U'}\n for char in dna:\n if char == 'C' or char == 'G' or char == 'T' or char == 'A':\n #converting only of the valid string is encountered\n #then the string is converted accordingly\n str = str + dict[char]\n #the case for incalid string, it throws only the error\n else :\n str = 'invalid character entered, please check the input'\n break\n return str", "def __call__(self, s, a=None, d=None):\n if a is None: a = self.allchars\n if d is None: d = self.delchars\n return s.translate(a,d)", "def get_ans_str(self):\n if self.lese_antwort != \"\":\n return self.lese_antwort\n elif isinstance(self.antwort,str):\n return self.antwort\n else:\n return self.antwort[0]", "def unescape_special_areas(data: str):\n return re.sub(r\"[\\ue000-\\ue0ff]\", _restore_from_private_code_plane, data)", "def translate_leet(phrase):", "def replace_bad_characters(self, str):\n\n str = unicode(BeautifulStoneSoup(str,\n convertEntities=BeautifulStoneSoup.HTML_ENTITIES))\n str = unicodedata.normalize('NFKD', str).encode('ascii', 'ignore')\n str = unicode(re.sub('[^\\w\\s-]', '', str).strip().lower())\n str = unicode(str.replace(' ', '-'))\n return str", "def from_string(string):\n return Output('', magic=string)", "def createAnswer(self, input):\n\n input = input.strip()\n\n if len(input) == 1:\n if input in self.guessedChars:\n return \"Oled juba tähte \" + input + \" pakkunud. Paku midagi muud. \\nHetkel proovitud \" + ' '.join(\n self.guessedChars) + \"\\n\" + self.wordKnown\n else:\n self.addChar(input)\n if self.isWordSet():\n return self.answerIsSet(input)\n else:\n self.filterDict(input)\n if self.isWordSet():\n return self.answerIsSet(input)\n else:\n return \"Kahjuks tähte \" + input + \" sõnas ei ole. Vaja veel \" + str(\n self.wordKnown.count(\"_\")) + \" ära arvata. \\nHetkel proovitud \" + ' '.join(\n self.guessedChars) + \" \\n\" + self.wordKnown\n elif input == \"\":\n return \"Võiks midagi ikka sisestada ka...\\nHetkel proovitud \" + ' '.join(\n self.guessedChars) + \" \\n\" + self.wordKnown\n else:\n if input == \"aitab\":\n self.active = False\n return \"Kui aitab siis aitab. Sõna, mida ma mõtlesin, ma sulle ikkagi ei ütle. Jäägu see elu lõpuni \" \\\n \"Sind piinama.\"\n if self.word == input:\n self.active = False\n return \"Arvasid ära, mõtlesin tõesti sõna \" + self.word + \".\"\n else:\n self.removeWordFromDict(input)\n return \"Ei, ma kohe kindlasti ei mõelnud sõna \" + input + \"... Proovi veel. \\nHetkel proovitud \" \\\n \"\" \\\n \"\" \\\n \"\" + ' '.join(self.guessedChars) \\\n + \" \\n\" + self.wordKnown", "def s2s(s):\n if (s is None): return \"\"\n else: return s", "def reconstruct_ngram(self, ngram):\n\n punc_b = ['!', '?', '.', ',', ';', ':', '\\'', ')', ']', '}']\n punc_a = ['(', '[', '}', '$']\n ngram = ' '.join(ngram)\n for p in punc_b:\n ngram = ngram.replace(' '+p, p)\n for p in punc_a:\n ngram = ngram.replace(p+' ', p)\n ngram = re.sub('(^| )BEGQ', ' \"', ngram)\n ngram = re.sub('ENDQ($| )', '\" ', ngram)\n ngram = ngram.replace('DOUBLEDASH', '--')\n return ngram", "def _normalize_linefeeds(a_string):\n newline = re.compile(r'(\\r\\r\\n|\\r\\n|\\n\\r)')\n return newline.sub('\\n', a_string).replace('\\n\\n', '\\n')", "def test_unicode_string(self):\n attributes = [\n (\"key1\", [\"value11\", \"value12\"]),\n (\"key2\", [\"value21\", \"value22\"]),\n ]\n result = asLDIF(\"entry\", attributes)\n self.assertEqual(\n result,\n b\"\"\"\\\ndn: entry\nkey1: value11\nkey1: value12\nkey2: value21\nkey2: value22\n\n\"\"\",\n )", "def FromHumanReadable(self, string: Text):\n pass", "def safe(self, string):\n if sys.version_info.major >= 3 and isinstance(string, bytes):\n string = string.decode('utf8')\n elif sys.version_info.major < 3:\n if not isinstance(string, unicode):\n string = unicode(string, encoding='utf8')\n string = string.replace('\\n', '')\n string = string.replace('\\r', '')\n return string", "def set_alt(self, alt):\n self._set_sub_text('alt', text=str(alt))\n return self", "def ntriples_unquote_validate(input: str) -> str:\n old_validate = ntriples.validate\n try:\n ntriples.validate = True\n return ntriples.unquote(input)\n finally:\n ntriples.validate = old_validate", "def build_from_string(self, obj):\n if self.string_type is unicode and not isinstance(obj, unicode):\n obj = str(obj).decode('utf-8')\n if self.string_type is str and not isinstance(obj, str):\n obj = unicode(obj).encode('utf-8')\n return self.art_type(obj.splitlines())", "def parseString(self, s):\n pass", "def formatengsrt(input, output):\n \n p = ( (r\"-=.*?=-\\s+\", \"\", re.U), #类似 ==http://a.b.c/=- 删除\n (r\"<.*?>\", \"\", re.U), #类似 <...> 删除\n (r\"^[\\s\\d\\-:>,]*[\\r\\n]+\", r\"\", re.M|re.U), #'-'是特殊字符\n (r\"(\\S+)\\s+$\", r\"\\1\", re.M|re.U), #删除结尾的空余字符\n (r\"\\.{3}[\\r\\n]+([a-z])\", r\" \\1\", re.U), #结尾是...的,下一行开始是一个小写字母的。\n (r\"([^\\.?!])[\\r\\n]+\", r\"\\1 \", re.U), #结尾没有 .?!的,添加空格,去掉回车换行\n (r\"(\\w+)[,.?!](\\w)\", r\"\\1, \\2\", re.U), #有些单词后逗号后面没有空格,加上\n )\n\n d = chardet.detect(open(input, \"r\").read())\n print \"[%s] 自动检测为 %s\" %(input, d)\n\n with codecs.open(input, encoding=d['encoding'], mode='r') as fi:\n t = fi.read()\n \n for a, b, c in p:\n t = re.sub(a, b, t, 0, c)\n\n with codecs.open(output, encoding=d['encoding'], mode='w') as fo:\n fo.write(t)\n print \"[{}] compeleted.\".format(output)", "def SafeTranslate(inputstr):\n\n badchars_map = string.maketrans('\\t !#$%&\\'()*+,./:;<=>?@[\\\\]^{|}~',\n '______________________________')\n if isinstance(inputstr, unicode):\n inputstr = inputstr.encode('utf8')\n return inputstr.translate(badchars_map)", "def short_form(self):\n return normalize_pred_string(self.string)", "def _sanitize(opt, value):\n return value if not opt.secret else '*' * 4", "def none_string(line):\n out_line = None if line.lower() == \"none\" or len(line) == 0 else line\n return out_line" ]
[ "0.5630695", "0.5217001", "0.5198397", "0.5186945", "0.5184185", "0.51392734", "0.51312214", "0.50683737", "0.50631917", "0.5061903", "0.5048459", "0.50467837", "0.50248337", "0.49875286", "0.4931769", "0.49110556", "0.48986942", "0.48867592", "0.48742884", "0.4862726", "0.4840829", "0.48408157", "0.48179346", "0.48179346", "0.4807754", "0.47804108", "0.4769348", "0.47580546", "0.47570357", "0.47521055", "0.47488865", "0.47479954", "0.47427", "0.47245708", "0.47197202", "0.47185", "0.47174945", "0.4713453", "0.4713453", "0.47126257", "0.47013697", "0.46975422", "0.46970376", "0.4689409", "0.46851414", "0.4678995", "0.46771786", "0.46656767", "0.46637493", "0.466232", "0.46613735", "0.4654064", "0.46525377", "0.46500847", "0.46498597", "0.46472493", "0.46466246", "0.46226436", "0.46207792", "0.46156374", "0.46136555", "0.45957088", "0.458548", "0.45730966", "0.4571972", "0.4571972", "0.45664108", "0.45613554", "0.4561026", "0.4557461", "0.45571733", "0.4554218", "0.45492256", "0.4544012", "0.4536249", "0.45247874", "0.45218778", "0.45136297", "0.45111987", "0.4508491", "0.45009872", "0.4500821", "0.44997454", "0.4497894", "0.44909987", "0.44895744", "0.44894132", "0.44879094", "0.44872275", "0.44832966", "0.44764403", "0.44735664", "0.446952", "0.44661698", "0.44603", "0.44592842", "0.44575423", "0.44552678", "0.44537428", "0.44504726" ]
0.5572365
1
Returns None on success, otherwise a string of all the failures
def test(self, values: Dict[str, Any]) -> Optional[str]: reasons = [] for alt in self.alternatives: reason = alt.test(values) if reason is None: return None reasons.append(reason) return " AND ".join(reasons)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_error_message(self):\n msg = 'Test case: ' + self.benchmark + '.yaml + ' + self.producer + '.yaml failed. '\n info = ''\n if not self.directory:\n info = 'No results directory found. The benchmark probably failed'\n elif not self.reports:\n info = 'No results report generated. The results output format is probably wrong'\n elif not self.test_passed:\n info = 'Recorded messages percentage is lower than expected '\n return msg + info", "def format_error (result):\n if check_ok (result):\n return 'exiftool finished probably properly. (\"%s\")' % strip_nl(result)\n else: \n if result is None:\n return \"exiftool operation can't be evaluated: No result given\"\n else:\n return 'exiftool finished with error: \"%s\"' % strip_nl(result)", "def get_failure_message(self, value: T) -> str:\n try:\n return f\"'{value}' does not satisfy '{self.test.__name__}'\"\n except AttributeError:\n return str(value)", "def __str__(self):\n return \"\\n\\n\".join(self.failures)", "def error(self):\n errors = self._info.get('error', {}).get('errors')\n if not errors:\n return None\n return ' '.join(err.get('message', 'unknown') for err in errors)", "def formatFailure(self, test, err):\n return self.formatError(test, err)", "def execute_failure(self, *args, **kwargs):\n return 1, \"\", None", "def text_summary_message(self):\n failed = [e for e in self.evaluations if not e.passes]\n if failed == []:\n return \"SUCCESS - all constraints evaluations pass\"\n else:\n return \"FAILURE: %d constraints evaluations failed\" % len(failed)", "def result_stderr(result):\n return result[1][1]", "def describe(result_code):\n return _MESSAGES.get(result_code) or 'unknown error'", "def error(err):\n\n return str(err) + '\\n'", "def print_errors(res, ctx):\n\n if _has_error_code(res):\n return res.get('msg', '')\n return None", "def error():\n return None", "def collect_errors_and_warnings(self) -> str:\n # Complete error message\n message = \"----------------ERRORS----------------\\n\"\n if self.errors == \"\":\n message = \"YOUR FILE IS VALIDATED!\\n\"\n logger.info(message)\n else:\n for error in self.errors.split(\"\\n\"):\n if error != \"\":\n logger.error(error)\n message += self.errors\n if self.warnings != \"\":\n for warning in self.warnings.split(\"\\n\"):\n if warning != \"\":\n logger.warning(warning)\n message += \"-------------WARNINGS-------------\\n\" + self.warnings\n return message", "def _make_not_found_message(index: Union[int, slice, str]) -> str:\n msg = [f\"Analysis result {index} not found.\"]\n errors = self.errors()\n if errors:\n msg.append(f\"Errors: {errors}\")\n return \"\\n\".join(msg)", "def error_string(self):\n return self._error_string", "def error_message(self):\n summary = format(\"%i out of %s failed unexpectedly:\",\n self.pool.num_failed,\n pluralize(self.pool.num_commands, \"command\"))\n details = \"\\n\".join(\" - %s\" % cmd.error_message for cmd in self.commands)\n return summary + \"\\n\\n\" + details", "def msg(self):\n return self.m_errorMsg", "def failed(self):\n output = self.__call__()\n return output.failed", "def ErrorString(val):\n if val == I2CTransferError.ArbitrationLostError:\n return \"Arbitration lost\"\n elif val == I2CTransferError.NackError:\n return \"Nack error\"\n elif val == I2CTransferError.UnknownError:\n return \"Unknown error\"\n elif val == I2CTransferError.TxunderError:\n return \"TX underrun\"\n elif val == I2CTransferError.Success:\n return \"Success\"", "def complain_result(self) -> Optional[str]:\n utils.logger.debug(f\"vote_result({self.complain_votes[self.round].get_summary()})\")\n if self.complain_votes[self.round].is_completed():\n vote_result = self.complain_votes[self.round].get_result()\n return vote_result.hex_hx()\n else:\n return None", "def moveit_error_string(val):\n if val == MoveItErrorCodes.SUCCESS:\n return 'SUCCESS'\n elif val == MoveItErrorCodes.FAILURE:\n return 'FAILURE'\n elif val == MoveItErrorCodes.PLANNING_FAILED:\n return 'PLANNING_FAILED'\n elif val == MoveItErrorCodes.INVALID_MOTION_PLAN:\n return 'INVALID_MOTION_PLAN'\n elif val == MoveItErrorCodes.MOTION_PLAN_INVALIDATED_BY_ENVIRONMENT_CHANGE:\n return 'MOTION_PLAN_INVALIDATED_BY_ENVIRONMENT_CHANGE'\n elif val == MoveItErrorCodes.CONTROL_FAILED:\n return 'CONTROL_FAILED'\n elif val == MoveItErrorCodes.UNABLE_TO_AQUIRE_SENSOR_DATA:\n return 'UNABLE_TO_AQUIRE_SENSOR_DATA'\n elif val == MoveItErrorCodes.TIMED_OUT:\n return 'TIMED_OUT'\n elif val == MoveItErrorCodes.PREEMPTED:\n return 'PREEMPTED'\n elif val == MoveItErrorCodes.START_STATE_IN_COLLISION:\n return 'START_STATE_IN_COLLISION'\n elif val == MoveItErrorCodes.START_STATE_VIOLATES_PATH_CONSTRAINTS:\n return 'START_STATE_VIOLATES_PATH_CONSTRAINTS'\n elif val == MoveItErrorCodes.GOAL_IN_COLLISION:\n return 'GOAL_IN_COLLISION'\n elif val == MoveItErrorCodes.GOAL_VIOLATES_PATH_CONSTRAINTS:\n return 'GOAL_VIOLATES_PATH_CONSTRAINTS'\n elif val == MoveItErrorCodes.GOAL_CONSTRAINTS_VIOLATED:\n return 'GOAL_CONSTRAINTS_VIOLATED'\n elif val == MoveItErrorCodes.INVALID_GROUP_NAME:\n return 'INVALID_GROUP_NAME'\n elif val == MoveItErrorCodes.INVALID_GOAL_CONSTRAINTS:\n return 'INVALID_GOAL_CONSTRAINTS'\n elif val == MoveItErrorCodes.INVALID_ROBOT_STATE:\n return 'INVALID_ROBOT_STATE'\n elif val == MoveItErrorCodes.INVALID_LINK_NAME:\n return 'INVALID_LINK_NAME'\n elif val == MoveItErrorCodes.INVALID_OBJECT_NAME:\n return 'INVALID_OBJECT_NAME'\n elif val == MoveItErrorCodes.FRAME_TRANSFORM_FAILURE:\n return 'FRAME_TRANSFORM_FAILURE'\n elif val == MoveItErrorCodes.COLLISION_CHECKING_UNAVAILABLE:\n return 'COLLISION_CHECKING_UNAVAILABLE'\n elif val == MoveItErrorCodes.ROBOT_STATE_STALE:\n return 'ROBOT_STATE_STALE'\n elif val == MoveItErrorCodes.SENSOR_INFO_STALE:\n return 'SENSOR_INFO_STALE'\n elif val == MoveItErrorCodes.NO_IK_SOLUTION:\n return 'NO_IK_SOLUTION'\n else:\n return 'UNKNOWN_ERROR_CODE'", "def _get_error_text(self, result: dict) -> str:\n try:\n return result[self._FIELD_TEXT]\n except KeyError:\n return self._DEFAULT_ERROR_MSG", "def buildErrorMessage(self, test, err):\n\n errorMessage = \"\"\n errorMessage += test.id()\n errorMessage += \"\\n\\n\"\n\n errorMessage += traceback.format_exc() + \"\\n\"\n return errorMessage", "def __str__(self):\n return \"ERROR: \" + self.error_message", "def __str__(self) -> str:\n return f\"Scrape <Success: {str(not self.failed)}>\"", "def last_failure(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_failure\")", "def get_status_repr(results):\n\n if isinstance(results[0], str) or isinstance(results[1], str):\n return 'No data for making decision.'\n\n if results[0] and results[1]:\n return 'It`s definitely truth.'\n\n if results[0] or results[1]:\n return 'Probably it`s truth.'\n\n return 'Most likely it`s a fake.'", "def print_failure(text):\n\n print(colorize(text, Colors.FAIL))", "def _exc_info_to_string(self, err, test):\n\n info = super(CustomTextTestResult, self)._exc_info_to_string(err, test)\n\n if self.showAll:\n info = 'Test number: {index}\\n{info}'.format(\n index=test.progress_index,\n info=re.sub(\"AssertionError:(.*?):\", \"\\nERROR WAS:\\n\", info)\n )\n\n return info", "def summary_str(self):\n if not self.results:\n return self.summary.empty() or ''\n elif self.state == Ok:\n return self.summary.ok(self.results) or ''\n return self.summary.problem(self.results) or ''", "def print_fails(self,result,cause=False,detail=False):\n fails = result.get_fails()\n if fails:\n print ('=== FAILS '+('='*60))\n for fail in fails:\n print (fail.id)\n if cause:\n print (' ',fail.get_cause())\n if detail:\n for key in ['ISQL_stripped_diff','Python_stripped_diff',\n 'ISQL_stderr_stripped_diff',\n 'Python_stderr_stripped_diff']:\n if fail.has_key(key):\n print ('-' * 70)\n print ('%s:' % key)\n print (as_utf8(fail[key]))\n print ()", "def stderr(username, root_wf_id, wf_id, job_id, job_instance_id):\n dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)\n text = dashboard.get_stderr(wf_id, job_id, job_instance_id)\n\n if text.stderr_text == None:\n return 'No Standard error for workflow ' + wf_id + ' job-id ' + job_id\n else:\n return '<pre>%s</pre>' % utils.unquote(text.stderr_text)", "def error_msg(self):\n if not self.is_error:\n error = StateOptions.NONE\n else:\n error = self._get_error()\n return self._update_feature(WashDeviceFeatures.ERROR_MSG, error)", "def _get_problem_str(self):\n return ''", "def get_short_errors(self):\n if not self.was_successful():\n for traceback in self.data.traceback.split(\n CaseData.TB_SEPARATOR):\n\n traceback = traceback.strip(\" \\n\")\n bottom_line = traceback.rsplit(\"\\n\", 1)[-1].strip()\n yield \"{}: {}\".format(self.data.name, bottom_line)", "def bot_failed_comprehension(error_message=None):\n result = \"\"\n\n if error_message:\n result += error_message + \"\\n\"\n\n result += \"Please see [here]\"\n result += \"(https://www.reddit.com/r/NHL_Stats/comments/74skjv/bot_details/do0tjzz/) \"\n result += \"for tips on proper usage.\\n\\n\"\n return result", "def errback(result):\n append(result)\n return None", "def repr_failure(self, excinfo):\n if isinstance(excinfo.value, NbCellError):\n msg_items = [bcolors.FAIL + \"Notebook cell execution failed\" + bcolors.ENDC]\n formatstring = bcolors.OKBLUE + \"Cell %d: %s\\n\\n\" + \\\n \"Input:\\n\" + bcolors.ENDC + \"%s\\n\\n\" + \\\n bcolors.OKBLUE + \"Traceback:%s\" + bcolors.ENDC\n msg_items.append(formatstring % excinfo.value.args)\n return \"\\n\".join(msg_items)\n else:\n return \"pytest plugin exception: %s\" % str(excinfo.value)", "def error_str(rc):\n\treturn '{}: {}'.format(rc, mqtt.error_string(rc))", "def failure_cmd(self) -> str:\n return \"{} --enable=all -f -q {}\".format(\n self.conf.get_executable(), constants.ROOT_PATH + \"/data/cppcheck-152/trial-fail.cpp\"\n )", "def _results_debug_message(self):\n result = 'bisector.lkgr: %r\\n' % self.lkgr\n result += 'bisector.fkbr: %r\\n\\n' % self.fkbr\n result += self._revision_value_table()\n if (self.lkgr and self.lkgr.values and self.fkbr and self.fkbr.values):\n result += '\\n' + self._t_test_results()\n return result", "def error_message(message, output=None, desc=None, command=None):\n global test_name_text\n errmsg = \"\"\n errmsg += \"FAIL {}: {}\\n\".format(test_name_text, message)\n errmsg += \" dsc={}\\n\".format(desc if desc else get_description())\n errmsg += \" cmd={}\\n\".format(command if command else get_command())\n if output:\n errmsg += \"output==========================\\n\"\n errmsg += output\n errmsg += \"\\n================================\\n\"\n return errmsg", "def print_failure(msg):\n\n tf.print(BColors.FAIL + msg + BColors.ENDC, output_stream=sys.stderr)\n sys.exit(1)", "def __repr__(self) -> str:\n return f\"Scrape <Success: {str(not self.failed)}>\"", "def test_no_errors(self):\n test_error = \"\\r\\n--------------------------------------------------------------------\\r\\n\"\\\n \"Your code has been rated at 10.00/10 (previous run: 9.33/10, +0.67)\"\n\n self.assertEqual(\n format_errors(test_error),\n None\n )", "def _message_failed_job(self):\n self.ensure_one()\n return _(\"Something bad happened during the execution of the job. \"\n \"More details in the 'Exception Information' section.\")", "def get_error_message(self):\n try:\n msg = self.failed_restrictions[0].get_error_message(\n is_api=self.request and self.request.is_api\n )\n except IndexError:\n msg = None\n return msg", "def result_display(result):\n raise TryNext", "def error_str(rc):\n return f\"{rc}: {mqtt.error_string(rc)}\"", "def identify_result_error(self, record):\n return [\"error\"]", "def get_error(self) -> List[str]:\n return []", "def get_error(self) -> List[str]:\n return []", "def getFailure(self, *args):\n return _libsbml.SBMLValidator_getFailure(self, *args)", "def error_str(rc):\n return '{}: {}'.format(rc, mqtt.error_string(rc))", "def stderr(self):\n if self._uuid is None:\n return \"\"\n resp = self._connection._get(\n get_url('task stderr', uuid=self._uuid))\n\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n\n raise_on_error(resp)\n return resp.text", "def get_message(self):\n if self.lineno:\n return \"Compilation error at line %d: %s\" % (self.lineno, self.details)\n else:\n return \"Compilation error: \" + self.details", "def error_to_text(ex):\n\tif isinstance(ex, FailedProcessError) and ex.args[0] == 'youtube-dl' and ex.exitcode == 1:\n\t\treturn 'Download error: {}'.format(ex.stderr)\n\treturn \"Internal error {}: {}\".format(type(ex).__name__, ex)", "def format_fail(self, *args):\n if self._pretty:\n return self.format_multiline_message(*args, color='red', start='[FAIL] ', multiline=' ~~ ')\n return self.format_multiline_message(*args)", "def __str__(self) -> str:\r\n return f'{self.error_code} ---> {self.message}'", "def test_install_error_message(self):\n\n fail_msg = \"Failure message\"\n\n fail_file = Path(self.dockerfile_dirpath) / \"matlab-install\" / \"FAIL\"\n\n with open(str(fail_file), \"w\") as ff:\n ff.write(fail_msg + \"\\n\")\n self.addCleanup(utils.remove_file, fail_file)\n\n build_msg = utils.get_build_output(\n docker_api_client=self.client.api,\n dockerfile_dirpath=self.dockerfile_dirpath,\n release=\"latest\",\n )\n\n self.assertTrue(any([fail_msg in msg for msg in build_msg]))", "def fresh_stderr(self):\n if self._uuid is None:\n return \"\"\n resp = self._connection._post(\n get_url('task stderr', uuid=self._uuid))\n\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n\n raise_on_error(resp)\n return resp.text", "def cmd_error_check(self, cmd_out):\n for err in self.err_strings:\n if re.search('\\\\b%s\\\\b' % (err), cmd_out, re.I):\n _log.info(cmd_out)\n _log.info(\n \"Cmd execution failed! with this Return Error: \\n%s\" % (\n cmd_out))\n return 0", "def print_failed(self):\n # Construct the message dynamically based on the instance_type\n msg = colored(\"FAIL\", \"red\") + f\" | [ERROR] {self.message}\"\n if self.instance_type == \"FILE\":\n msg += f\" [{self.instance_type}] {self.instance_location}/{self.instance_name}\"\n\n elif self.instance_type == \"HOST\":\n msg += f\" [{self.instance_type}] {self.instance_hostname}\"\n\n msg += f\" [PROPERTY] {':'.join(str(item) for item in self.absolute_path)}\"\n\n # print the msg\n print(msg)", "def not_string_error(name, yml):\n\n yml = symlink_target(yml)\n output_1 = path(yml) + '\\n'\n output_2 = colored(' - Error: ', 'red')\n output_3 = colored(name, attrs=['bold'])\n output_4 = colored(' type should be ', 'red')\n output_5 = colored('str', 'yellow')\n return output_1 + output_2 + output_3 + output_4 + output_5", "def get_error_state(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .(.) .*? .*? .*? .*? .*? . .*? .*? . . . .*?'\n error = re.findall(pattern,summary).pop()\n if error == '-':\n msg = 'No Error'\n elif error == 'V':\n msg = 'Battery over voltage'\n elif error == 'v':\n msg = 'Battery under voltage'\n elif error == 'I':\n msg = 'Battery over current'\n elif error == 'C':\n msg = 'Battery max cell over voltage'\n elif error == 'c':\n msg = 'Battery min cell under voltage'\n elif error == 'x':\n msg = 'Battery min cell under fault voltage (2.0V)'\n elif error == 'T':\n msg = 'Battery over temperature'\n elif error == 'W':\n msg = 'Battery moisture intrusion detected by H2O sensors'\n elif error == 'H' or error == 'h':\n msg = 'Battery internal hardware fault'\n elif error == 'm':\n msg = 'Battery watchdog timeout'\n return error,msg", "def composeTestingSummaryEmail(self):\r\n brokenPlatforms = 0\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n brokenPlatforms = brokenPlatforms + 1\r\n\r\n if brokenPlatforms == 0:\r\n return None;\r\n \r\n message = \"\"\"From: Douglas Gregor <[email protected]>\r\nTo: [email protected]\r\nReply-To: [email protected]\r\nSubject: [Report] \"\"\"\r\n message += str(brokenPlatforms) + \" potentially broken platforms on \" + branch\r\n if branch != 'trunk':\r\n message += ' branch'\r\n message += \" (\" + str(datetime.date.today()) + \")\"\r\n message += \"\"\"\r\n\r\nPotentially broken platforms for Boost regression testing\r\n\"\"\"\r\n message += \"Report time: \" + self.date + \"\"\"\r\n\r\nThis report lists the high-priority platforms that are exhibiting a\r\nlarge number of regression test failures, which might indicate a problem\r\nwith the test machines or testing harness.\r\n\r\nDetailed report:\r\n\"\"\"\r\n\r\n message += ' ' + self.url + '\\n'\r\n\r\n message += \"\"\"\r\nPlatforms with a large number of failures:\r\n\"\"\"\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n message += (' ' + platform + ' ('\r\n + str(len(self.platforms[platform].failures))\r\n + ' failures)\\n')\r\n\r\n return message", "def fail_with(s):\n print \"[FAILURE] %s\" % s\n sys.exit(1)", "def test_update_enforcement_mode_command_failure_human_readable(\n enforcement_mode_failure_hr, enforcement_mode_failure_expected\n):\n resp = prepare_update_enforcement_mode_output(enforcement_mode_failure_expected)\n\n assert resp == enforcement_mode_failure_hr", "def print_failures(failures):\n if failures:\n print(\"\\n({}) Failure{}:\".format(len(failures),\n \"s\" if len(failures) != 1 else \"\"))\n for f in failures:\n print(\"[{}:{}] In {}: {}\".format(\n f.filename, f.lineno, f.case, f.data),\n end='')\n print(\" (\\\"{}\\\")\".format(f.alt) if f.alt else \"\")\n print()", "def getLastError(self):\n errors = self.getErrorsList()\n if (len(errors) > 0):\n return errors[len(errors) - 1]\n return None;", "def repr_failure(self, excinfo):\n if excinfo.errisinstance(MypyError):\n return excinfo.value.args[0]\n return super().repr_failure(excinfo)", "def func_case(self):\n test.success(\"\")", "def _sc_print_ ( sc ) :\n from Bender.Logger import colored_string \n if sc.isSuccess () : return colored_string( 'SUCCESS' , WHITE , GREEN , True ) \n elif sc.isRecoverable () : return colored_string( 'RECOVERABLE' , RED , YELLOW , True ) \n elif 0 != sc.getCode () :\n return colored_string('FAILURE[%d]' % sc.getCode() , YELLOW , RED , True ) \n return colored_string('FAILURE' , YELLOW , RED , True )", "def _identify_fail(failure):\n logger.warning(failure.getErrorMessage())\n logger.warning(\"Failed to setup & obtain identity\")\n return", "def errors(self) -> str:\n return self.job_errors() + self.analysis_errors()", "def failure(self, error):\n print \"comm failed Reason:\", error\n return error", "def error_msg(self) -> str:\n return self.__error_msg", "def expected_failure(self) -> int:\n return 139", "def __str__(self):\n base_message = self.base_message.format(filename=self.yaml_file_path)\n error_message = ERROR_MESSAGE.format(key=self.key, expected=self.expected)\n return base_message + error_message", "def obj_successfail(succeeded):\n if succeeded:\n return \"<span class='objective success'>Success</span>\"\n else:\n return \"<span class='objective failure'>Failure</span>\"", "def print_failed(failed: list, spaces: int = 8) -> str:\n lines = []\n for key, val in sorted(failed.items()):\n if key != 'failed-cutoff':\n lines.append(f'{spaces * \" \"}{key.replace(\"-\", \" \").title()}: {len(val)}')\n return \"\\n\".join(lines) if lines else \"\"", "def message_content(connections_result_passed, connections_result_failed):\n color = 'good'\n message_list = []\n correct = u'\\u2714'\n cross = u'\\u2716'\n\n if len(connections_result_passed + connections_result_failed) == 0:\n message = 'No hosts mentioned in the Endpoints.yaml file'\n else:\n if len(connections_result_failed) > 0:\n color = 'danger'\n failed_content = construct_message(connections_result_failed, cross)\n pass_content = construct_message(connections_result_passed, correct)\n message_list.append(\"\\n\")\n message = ''.join(failed_content + pass_content)\n print(message)\n return message, color", "def fail():\n sys.stdout.write('%s[ fail ]%s\\n' % (colors.RED, colors.RESET))", "def errorString(node, error):\n\n back, fore = error[0], error[1]\n\n if len(back) == 0:\n back = 'None'\n\n if len(fore) == 0:\n fore = 'None'\n \n return ('Node {0}: missing backlinks {1},'\n ' missing forelinks {2}').format(node, back, fore)", "def __repr__(self: \"Failed\") -> str:\n return \"Failed()\"", "def parsed_error_msg(self):\r\n return self.error_msg", "def test_to_json_with_non_result(self):\n actual_result = ResultBuilder(None,\n ERROR_MESSAGE,\n ERROR_RETURN_CODE).build_json()\n self.assertMultiLineEqual(actual_result, EXPECTED_OUTPUT_BUILDER_ERROR)", "def indicate_failure(self):\n pass", "def get_massage_fail(self):\r\n return self.driver.find_element(*SinginPage.massagefail).text", "def get_error_message(self, data, response=None):\n return str(data)", "def passed(test: bool) -> str:\n return 'passed' if test else 'failed'", "def last_failure(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_failure\")", "def test_print_result(capsys):\n assert \"\"\"Total 5 hands solved\nTotal 4 hands solved with hint\nTotal 4 hands failed to solve\"\"\" in hl.test_help_print_result(capsys)", "def test_display_failed():\n cmd_list = [NETMIKO_GREP] + ['interface', 'all']\n (output, std_err) = subprocess_handler(cmd_list)\n assert \"Failed devices\" in output\n failed_devices = output.split(\"Failed devices:\")[1]\n failed_devices = failed_devices.strip().split(\"\\n\")\n failed_devices = [x.strip() for x in failed_devices]\n assert len(failed_devices) == 2\n assert \"bad_device\" in failed_devices\n assert \"bad_port\" in failed_devices", "def failure_detail(self) -> Optional[pulumi.Input['FailureDetailArgs']]:\n return pulumi.get(self, \"failure_detail\")", "def print_failure_msg(msg):\n click.secho(msg, fg='red', file=sys.stderr)", "def result_summary(self):\r\n summary = ['Ran %d commands to test %d scripts. %d of these commands '\r\n 'failed and %d scripts could not be tested due to errors.' %\r\n (self.total_commands, self.total_scripts,\r\n self._num_failures(), self._num_script_errors())]\r\n\r\n if self._num_failures() > 0:\r\n summary.append('Failed scripts were: %s' %\r\n ' '.join(self._failed_scripts()))\r\n\r\n for error_info in self.script_errors.values():\r\n if len(error_info[0]) > 0:\r\n summary.append(self._format_script_error_summary(\r\n error_info[0], error_info[1]))\r\n\r\n if self.warnings:\r\n summary.append('Warnings:')\r\n for warning in self.warnings:\r\n summary.append(' ' + warning)\r\n\r\n return '\\n'.join(summary)", "def displayText(self, value, locale=None):\n if value is None:\n return \"\"\n\n value = value.toPyObject()\n\n if isinstance(value, Exception):\n result = \"Error\"\n else:\n results = value.xml, value.profile, value.best_practices\n invalid = any(getattr(x, 'is_valid', None) is False for x in results)\n result = \"Invalid\" if invalid else \"Valid\"\n\n return super(ResultsDelegate, self).displayText(result, locale)", "def get_error_description(self, code):\n self.c.execute(\"SELECT * FROM errorcode WHERE code=%d\" % code)\n return self.c.fetchone()[1]", "def outcome_string(outcome):\r\n return WorkUnit.choose_for_outcome(outcome, 'ABORTED', 'FAILURE', 'WARNING', 'SUCCESS', 'UNKNOWN')" ]
[ "0.7311255", "0.6805091", "0.6787462", "0.67572325", "0.66535556", "0.66410094", "0.66374403", "0.653866", "0.6504157", "0.64584297", "0.64362466", "0.6418307", "0.63620204", "0.6343835", "0.62825376", "0.62660265", "0.62512654", "0.6249964", "0.6241896", "0.6228709", "0.62242913", "0.6224097", "0.62056077", "0.6170042", "0.61671", "0.6158066", "0.6147229", "0.61421126", "0.61238045", "0.61225593", "0.611505", "0.6112711", "0.61103463", "0.6098914", "0.60980767", "0.60956395", "0.60847175", "0.60832506", "0.60791546", "0.6071663", "0.60371643", "0.6036951", "0.6021093", "0.60165584", "0.60142493", "0.60139555", "0.6001191", "0.60002685", "0.59697187", "0.59586626", "0.595356", "0.5951907", "0.5951907", "0.5948645", "0.5948158", "0.59476423", "0.5941875", "0.5940259", "0.59382457", "0.59368056", "0.5934446", "0.5932472", "0.59319216", "0.5930244", "0.5927807", "0.5920402", "0.5919981", "0.59063053", "0.58947563", "0.589075", "0.58893555", "0.58890235", "0.5869722", "0.5868283", "0.58585197", "0.5855829", "0.58514434", "0.58440113", "0.58382833", "0.58343226", "0.5831053", "0.5827871", "0.5820629", "0.5809268", "0.58059776", "0.5800819", "0.579867", "0.57930005", "0.57827103", "0.5782361", "0.57781434", "0.57737404", "0.5773648", "0.57711905", "0.57598263", "0.5759822", "0.5757537", "0.5753946", "0.5752895", "0.5731761", "0.57267374" ]
0.0
-1
Pull a Restriction from encoded string, return remainder
def decode(cls, encstr: str) -> Tuple['Restriction', str]: alts = [] while len(encstr) != 0: if encstr.startswith('&'): encstr = encstr[1:] break alt, encstr = Alternative.decode(encstr) alts.append(alt) return cls(alts), encstr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_str(cls, encstr: str) -> 'Restriction':\n encstr = re.sub(r'\\s+', '', encstr)\n ret, remainder = cls.decode(encstr)\n if len(remainder) != 0:\n raise ValueError(\"Restriction had extrs characters at end: {}\"\n .format(remainder))\n return ret", "def parse_mask(string):\n return string.split(' = ')[1]", "def process_restriction(restriction):\n if not restriction:\n return ''\n else:\n res = restriction.lower()\n if res == 'eaff':\n return '-eAFF'\n elif res == 'ey2h':\n return '-eY2H'\n elif res == 'y2h':\n return '-rY2H'\n elif res == 'aff':\n return '-rAFF'\n else:\n raise network_generation.IncorrectRestrictionType(res)", "def decode(self, s):", "def decode(self, s):", "def extract_critic_input(self, data):\n return data[1]", "def decode_match(s):\n if '=' in s:\n raise TypeError\n while True:\n try:\n bin = standard_b64decode(s)\n except TypeError, e:\n if str(e) != 'Incorrect padding':\n raise\n s += '='\n else:\n break\n return MatchProxy(bin)", "def decode_base64(self, s):\n return self.transcode(struct.unpack('!L', base64.b64decode(s + '==', self.extra_chars))[0])", "def decode(cls, encstr: str) -> Tuple['Alternative', str]:\n cond = None\n end_off = 0\n\n # Swallow field up to conditiona\n while end_off < len(encstr):\n if encstr[end_off] in string.punctuation:\n cond = encstr[end_off]\n break\n end_off += 1\n if cond is None:\n raise ValueError('{} does not contain any operator'\n .format(encstr))\n field = encstr[:end_off]\n end_off += 1\n\n value = ''\n while end_off < len(encstr):\n if encstr[end_off] == '|':\n # We swallow this\n end_off += 1\n break\n if encstr[end_off] == '&':\n break\n if encstr[end_off] == '\\\\':\n end_off += 1\n value += encstr[end_off]\n end_off += 1\n\n return cls(field, cond, value), encstr[end_off:]", "def decode_string(self, value):\r\n return value", "def decode_extra_field(self, string):\n\n if isinstance(string, str):\n try:\n decode = int(string)\n except ValueError:\n return string\n return decode\n else:\n return string", "def minisat_decode(clause_str):\n factor = ClauseVariable.encoding_factor()\n int_value = int(clause_str)\n compliment = (int_value < 0)\n int_value = abs(int_value)\n position = (int_value % factor) -1\n vertex = math.ceil(int_value/factor)-1\n return ClauseVariable(compliment,vertex,position)", "def string_bits(myStr):\n\n other = myStr[::2] \n \n return other", "def match(code, x):\n return decode(code)(x)", "def _mb_substr(string, start, length):\n return string.decode(_ENCODING)[start: start + length]", "def decode(self, s, _w=WHITESPACE.match):\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n end = _w(s, end).end()\n if end != len(s):\n raise ValueError(errmsg(\"Extra data\", s, end, len(s)))\n return obj", "def decode(self, s, _w=WHITESPACE.match):\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n end = _w(s, end).end()\n if end != len(s):\n raise ValueError(errmsg(\"Extra data\", s, end, len(s)))\n return obj", "def decode_result(found):\n ...", "def decode_network_string(msgtype, plen, buf):\n return buf[header.size:plen - 1]", "def auth_sub_string_from_body(http_body):\n for response_line in http_body.splitlines():\n if response_line.startswith('Token='):\n # Strip off Token= and return the token value string.\n return response_line[6:]\n return None", "def decode(self, s, _w=WHITESPACE.match):\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n end = _w(s, end).end()\n\n return obj", "def extract_string(line, idx, result):\n\n begin = line.find(resource_string_prefix, idx)\n if begin == -1:\n return -1\n \n begin = begin + len(resource_string_prefix)\n end = -1\n for i in range(begin, len(line)):\n if not is_valid_char(line[i]):\n end = i\n break\n\n result.add(line[begin:end])\n return end", "def decode(self, encoded):", "def extract(string, start_marker, end_marker):\n start_loc = string.find(start_marker)\n end_loc = string.find(end_marker)\n if start_loc == -1 or end_loc == -1:\n return \"\"\n return string[start_loc+len(start_marker):end_loc]", "def decode(self, byteString):\n decoded = ''\n portion_left = byteString\n while len(portion_left) > 0:\n substr_len = 1\n symbol = None\n while (symbol == None) and (substr_len <= len(portion_left)):\n symbol = self.decode_symbol(portion_left[:substr_len])\n substr_len += 1\n\n if symbol == None:\n print \"decode failed:\"\n print \"decoded: \" + decoded\n print \"left: \" + portion_left\n return None\n\n decoded += symbol\n #print \"decoded: _\" + symbol + \"_\"\n portion_left = portion_left[substr_len-1:]\n\n return decoded", "def get_strInstructions(json):\n\n strInstructions = introcs.find_str(json,'\"strInstructions\"')\n\n\n string = json[strInstructions+17:]\n\n\n result = first_inside_quotes(string)\n\n\n return result", "def parse_special_word(s):\n index1 = s.find(special_word_marker)\n if index1 != -1:\n index2 = s.find(special_word_marker, index1 + 1)\n if index2 != -1:\n sw = normalize(s[index1+len(special_word_marker) : index2])\n rest = normalize(s[index2+len(special_word_marker) :])\n return sw, rest\n return None, s", "def _DecodeAccidentalString(cls, sAccidental):\n sAcc = sAccidental.strip()\n # Strip non-accidental content\n index = 0\n for n in range(len(sAcc)):\n if (sAcc[n] not in cls.lilyFlat) and (sAcc[n] not in cls.lilySharp):\n index = n\n break\n sAcc = sAcc[:index]\n encFlat = cls.encodingAccidentals.get(cls.lilyFlat, None)\n encSharp = cls.encodingAccidentals.get(cls.lilySharp, None)\n if cls._reFlat.match(sAcc):\n return encFlat*len(sAcc)//2\n elif cls._reSharp.match(sAcc):\n return encSharp*len(sAcc)//2\n else:\n return 0", "def selector(string,key,lkey,lval):\n print string\n ip = string.find(key)\n print 'key =',key, 'position =',ip\n if ip > -1:\n value = string[ip+lkey:ip+lkey+lval]\n print 'velue = ',value\n else:\n value = 'none'\n \n return value", "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)", "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)", "def extract_string(begin, end, string):\n b = string.find(begin) + len(begin)\n e = string.find(end, b)\n\n return string[b:e]", "def get_access(x):\n pat = r\"access\\s*([\\w\\s]*)\"\n match = re.search(pat, str(x), flags=re.IGNORECASE)\n\n if match:\n return match.group(1)\n else:\n return \"Boom Lift\"", "def imaputf7decode(s):\n lst = s.split('&')\n out = lst[0]\n for e in lst[1:]:\n u, a = e.split('-', 1) #u: utf16 between & and 1st -, a: ASCII chars folowing it\n if u == '':\n out += '&'\n else:\n out += b64padanddecode(u)\n out += a\n return out", "def decode(data): #@NoSelf", "def get_mask_from_alignment(al):\n alignment_str = str(al).split(\"\\n\")[1]\n return alignment_str.replace(\"|\", \"+\")", "def find_start_of_can_message(input_str, can_msg):\n iterator = 0\n is_start_of_can_message_found = False\n for char in input_str:\n iterator += 1\n if char == 'W' or char == 'S':\n input_str = input_str[iterator::]\n is_start_of_can_message_found = True\n if char == 'W':\n can_msg.is_extended = 1\n else:\n can_msg.is_extended = 0\n break\n if is_start_of_can_message_found is True:\n return input_str\n else:\n return []", "def get_substring(self, start, end):\n return self.input[start:end]", "def interpret_requirement(string):\n string_list = split(string, sep=' ')\n \n requirement = Requirement(points, degree, majors, levels, max_non_degree)\n return requirement", "def ld8_extract(self, text):\n return re.search('\\d{5}_\\d{8}', text).group(0)", "def decode(str):\n s6 = re.sub('6','\\n',str)\n s5 = re.sub('5','44',s6)\n s4 = re.sub('4','33',s5)\n s3 = re.sub('3','22',s4)\n return re.sub('2',' ',s3)", "def decode_email(email):\n return", "def _get_string_from_packing(self, string_to_unpack):\n return string_to_unpack[4:]", "def decode(self, x):\n return x", "def extract_literal_string(memory,address,ztext):\n zchar_start_address = address\n text, next_address = ztext.to_ascii(memory,zchar_start_address,0) \n return next_address,text", "def test_percent_decode(self):\n new_case = uri.URI.parse_uri('https://www.google.com/search?aqs=chrome..69i57j0l3.9438j0&sourceid=chrome&Ladies%20+%20Gentlemen&q=setter+python&ie=UTF-8&oq=setter+python')\n self.assertEqual(new_case.get_query_arg('Ladies + Gentlemen'), None)", "def parse_policy_line(line):\n match = policy_pattern.match(line)\n lower = int(match.group('min'))\n upper = int(match.group('max'))\n assert upper >= lower\n letter = match.group('letter')\n assert len(letter) == 1\n password = match.group('password')\n assert len(password) >= 1\n return lower, upper, letter, password", "def decode_position(s):\n if '=' in s:\n raise TypeError\n while True:\n try:\n bin = standard_b64decode(s)\n except TypeError, e:\n if str(e) != 'Incorrect padding':\n raise\n s += '='\n else:\n break\n r = twoside_decode(bin)\n if not len(r) == 2 or not len(r[0])==25 or not len(r[1])==25:\n raise bglib.encoding.DecodeError('got bad data: %s '%(s,))\n return r", "def decode(self, value):\r\n return value", "def parse_creative_serving_decision(data):\n return json.loads(base64.b64decode(data))", "def fetch_address(cpr: str) -> str:\n\n return \"Åbogade 15, 8200 Aarhus N\"", "def decode(str):\r\n\tstr = str.translate(decode_translation)\r\n\tresult = 0\r\n\r\n\tfor c in str:\r\n\t\tresult = result * keyspace_len + keyspace.index(c)\r\n\treturn result", "def test_decode(self):\n assert url_encoder.decode('TheStakeOut') == 1\n assert url_encoder.decode('TheStockTip-TheSeven') == 800\n assert url_encoder.decode('MaleUnbonding-TheConversion-TheAndreaDoria') == 99999", "def extract(self, str):\n\n ips = re.match( r'^[0-9]+(?:\\.[0-9]+){3}', str)\n\n if ips:\n return ips.group(0)", "def get_masked_string(s, p):\r\n return (fromstring(s, dtype=uint8))[p].tostring()", "def get_masked_string(s, p):\n return (fromstring(s,dtype=uint8))[p].tostring()", "def LegalIn(f):\n return search(field='legalities', method=HASKEY, value=f.lower())", "def parse_from_placeholder(string,pattern,encloser='%',matcher='(.+)'):\n pattern,fields = placeholder_to_regex(pattern,encloser,matcher)\n return parse_from_regex(string,pattern,fields)", "def extract_critic_conditioning(self, data):\n return data[0]", "def process_address(text):\n return sanitize(text[9:])", "def decode(string: str) -> str:\n a = re.sub(r\"/\\+/g\", ' ', unquote(string))\n return a", "def get_year(string): \n return int(string[11:15])", "def selector_post(string,key,lkey,lval,sel_set):\n print string\n ip = string.find(key)\n print 'key =',key, 'position =',ip\n if ip > -1:\n value = string[ip-lval:ip]\n print 'value = ',value", "def maybe_urlencoded(fixed_in: str) -> str:\n try:\n d = urllib.parse.parse_qs(fixed_in)\n # There may be additional known-good keys in the future.\n return d[\"fixed\"][0]\n except (ValueError, KeyError):\n return fixed_in", "def decode_letter(self, received_letter):\n syndrome = (np.matmul(received_letter, self.parity_check)) % 2\n\n try:\n error_vector = self.syndrome_table[tuple(syndrome)]\n except KeyError:\n error_vector = random.choice(list(self.syndrome_table.values()))\n letter_vector = (error_vector + received_letter) % 2\n\n try:\n return self.code_words[tuple(letter_vector)]\n except KeyError:\n\n return random.choice(list(self.code_words.values()))", "def get_params(raw):\n parts = raw.split(\" \", 1)\n return None if len(parts) == 1 else parts[1]", "def decode(text, password):\r\n\tstep_index = 0\r\n\tdecoded_text = ''\r\n\tfor letter in text:\r\n\t\tdecoded_text += prev_letter(letter, to_int(password[step_index]))\r\n\t\tstep_index += 1\r\n\t\tif step_index > len(password)-1:\r\n\t\t\tstep_index = 0\r\n\treturn decoded_text", "def extract_sub(s: str):\n subject = re.search(r'sub-\\d+', s)[0]\n return subject", "def decode_base64(in_str):\n import base64\n return base64.decodestring(in_str)", "def key_f(f):\n regex_res = re.search(r'(?<=Magnetization\\-)[0-9]+(?=\\-)', f).group(0)\n return regex_res", "def decode(id_string, alphabet=ALPHABET):\n alphabet_len = len(alphabet) # Cache\n return sum([alphabet.index(char) * pow(alphabet_len, power) for power, char in enumerate(reversed(id_string))])", "def decode(self, encoded: str):\n if not isinstance(encoded, str) or not encoded:\n return None\n int_encoded = self._decode_str(encoded)\n if int_encoded is None:\n return None\n int_origin = self._int_obfuscator.decode(int_encoded)\n if int_origin is None:\n return None\n str_encoded = self.__encode(int_origin)\n return int_origin if str_encoded == encoded else None", "def _match_version_string(self, subject):\n if self.allowed_versions is None:\n self.allowed_versions = self._get_allowed_versions()\n if subject in self.allowed_versions:\n return self.allowed_versions[subject]\n else:\n raise ValueError()", "def decode(self, s):\n o = self._decoder.decode(s)\n return o", "def _decode_search_response(self, resp: ImapSearchResponseType) -> str:\n _, data = resp\n return data[0].decode()", "def __call__(self, string):\n import jieba\n str_list = list(jieba.cut(string, cut_all = False))\n return self.tbl.most_likely(str_list)", "def decode(a):\n return decode(a)", "def decode_syn(msg):\n\n cut = msg[4:] # Omit the first 4 chars ('SYN;')\n spl = cut.split(';')\n prime = int(spl[0])\n base = int(spl[1])\n a_public = int(spl[2])\n return prime, base, a_public", "def _match_start_get_remaining(self, start, text):\n if not text.startswith(start):\n return\n return text[len(start):]", "def decode_encoded_string_value(byte_iter):\n try:\n # First try \"Value-length Char-set Text-string\"\n value_length = wsp_pdu.Decoder.decode_value_length(byte_iter)\n # TODO: add proper support for charsets...\n try:\n charset = wsp_pdu.Decoder.decode_well_known_charset(byte_iter)\n except wsp_pdu.DecodeError, msg:\n raise Exception('encoded_string_value decoding error - '\n 'Could not decode Charset value: %s' % msg)\n\n return wsp_pdu.Decoder.decode_text_string(byte_iter)\n except wsp_pdu.DecodeError:\n # Fall back on just \"Text-string\"\n return wsp_pdu.Decoder.decode_text_string(byte_iter)", "def dummy_junction14():\n return \"junction:chr1:176-324:+\"", "def decode_secret(secret, encoding=SecretEncoding.BASE32):\n return _decoding_map[encoding](secret)", "def decode(k, key_length):\n key = k[:key_length]\n val_length, ber_length = decode_ber(k[key_length:])\n value = k[key_length + ber_length : key_length + ber_length + val_length]\n return key, value", "def _find_specie_in_str(specie, equation_str):\n check_length = len(specie) + 1\n found_specie = ' %s ' % specie in equation_str \\\n or equation_str[:check_length] == '%s ' % specie \\\n or equation_str[-check_length:] == ' %s' % specie \\\n\n return found_specie", "def decode(s):\n start = 0\n multiplier = 1\n for char in s[::-1]:\n start += multiplier * LETTERS.index(char)\n multiplier = multiplier * 58\n return start", "def read(string):\n\treturn (re.finditer('(?<=\\[)[a-z]+(?=\\])', string), re.finditer('(?<=\\])[a-z]+|[a-z]+(?=\\[)', string))", "def decode_from_value(byte_iter):\n value_length = wsp_pdu.Decoder.decode_value_length(byte_iter)\n # See what token we have\n byte = byte_iter.next()\n if byte == 129: # Insert-address-token\n return '<not inserted>'\n\n return MMSDecoder.decode_encoded_string_value(byte_iter)", "def urldecode(val):\n return urllib2.unquote(val)", "def decode_base64(in_str):\n return base64.decodestring(in_str)", "def decode(self, shortUrl):\n shortUrl = shortUrl[-6:]\n if shortUrl in self.short_to_long:\n return self.short_to_long[shortUrl]", "def decode(self, text):\r\n\r\n decoded = \"\".join([self.chars[int(x)] for x in text if x > -1])\r\n decoded = self.remove_tokens(decoded)\r\n decoded = pp.text_standardize(decoded)\r\n\r\n return decoded", "def dissect(self, text):", "def decode_field(field):\r\n field = field.replace('\\r\\n','')\r\n field = field.replace('\\n','')\r\n\r\n list = email.Header.decode_header (field)\r\n\r\n decoded = \" \".join([\"%s\" % k for (k,v) in list])\r\n\r\n #print \"Decoding [%s] to [%s]\" % (field, decoded)\r\n\r\n return decoded", "def decode():\n result = \"\"\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n shift = int(input(\"select a number to decode your message\"))\n inverted = alphabet[shift:] + alphabet[:shift]\n message = input(\"please enter a message to decode\")\n for y in message:\n result += alphabet[inverted.index(y)]\n print(result)", "def decode(encoded_word):\n decoded = []\n for i, letter in enumerate(encoded_word):\n if letter.isdigit():\n decoded.append(int(letter) * encoded_word[i + 1])\n elif not encoded_word[i - 1].isdigit():\n decoded.append(letter)\n\n decoded_word = ''.join(decoded)\n return decoded_word", "def __parse__(self, filter):\n \n if filter == 'zipcode':\n # Return 5 digit zip or, if applicable, Concatenate 5 digit and \n # 4 digit zipcode\n if self.data['Mailing Zip 4']:\n return \"%s-%s\" %(str(self.data['Mailing Zip Code'])[:-2],\n str(self.data['Mailing Zip 4'])[:-2]\n )\n else:\n return str(self.data['Mailing Zip Code'])[:-2]\n elif filter == 'employee_count':\n # Convert employee count string to digit\n pattern = '.+to\\s([0-9]+)'\n try:\n return re.findall(\n pattern, self.data['Location Employee Size Range'])[0]\n except IndexError:\n pass\n elif filter == 'phone':\n # Regex phone number digits and concatenate\n number = ''.join(re.findall('[0-9]+', \n self.data['Phone Number Combined']))\n return number if len(number) == 10 else 0", "def extractVal(self, regVal):\n reg_shift = regVal/2**self.minBit\n\n reg_mod = reg_shift % 2**self.nBits\n\n return reg_mod", "def read_until(string, untilseq=\"\"):\n idx = string.index(untilseq)\n return string[:idx]", "def decode_payload(encoded_payload):\n jwt_secret = app.config['SECRET_KEY']\n payload = jwt.decode(encoded_payload, jwt_secret, algorithms='HS256')\n\n return payload", "def dummy_junction12():\n return \"junction:chr1:176-224:+\"" ]
[ "0.6682689", "0.5536616", "0.54848987", "0.5409332", "0.5409332", "0.5249636", "0.52139443", "0.51220363", "0.5092601", "0.5083486", "0.50243723", "0.49601397", "0.4911395", "0.48801553", "0.48710787", "0.48674506", "0.48674506", "0.48620152", "0.4848582", "0.4832647", "0.4799203", "0.4796334", "0.47760275", "0.4772739", "0.47624645", "0.4756798", "0.47210005", "0.4720052", "0.4716607", "0.47060084", "0.47060084", "0.47030976", "0.4699176", "0.46986386", "0.46780518", "0.46645972", "0.4664305", "0.46487468", "0.46460527", "0.46374395", "0.46359494", "0.46328476", "0.46317106", "0.4630431", "0.46250764", "0.4605395", "0.46039087", "0.46021956", "0.458557", "0.45814213", "0.4574777", "0.45681703", "0.45636827", "0.45563918", "0.454196", "0.45309076", "0.45300892", "0.45253652", "0.4524797", "0.4520725", "0.450369", "0.4502123", "0.44889697", "0.44875857", "0.4486548", "0.44802064", "0.44745588", "0.4470301", "0.4470142", "0.4464792", "0.44573212", "0.4452581", "0.44515538", "0.44457334", "0.44405907", "0.4439264", "0.44362122", "0.4425892", "0.44229162", "0.44134387", "0.4406994", "0.44063404", "0.43939936", "0.43897626", "0.4387292", "0.43868926", "0.43864098", "0.43821824", "0.43773082", "0.43742192", "0.43655294", "0.4359915", "0.43599063", "0.4358775", "0.43560937", "0.43499574", "0.4345587", "0.43452218", "0.43449685", "0.43433315" ]
0.61234236
1
Returns a Restriction from an escaped string (ignoring whitespace)
def from_str(cls, encstr: str) -> 'Restriction': encstr = re.sub(r'\s+', '', encstr) ret, remainder = cls.decode(encstr) if len(remainder) != 0: raise ValueError("Restriction had extrs characters at end: {}" .format(remainder)) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _assign_regex(literal, regex):\n if regex:\n return regex.lower().strip()\n else:\n return r'\\b%s\\b'%literal.lower().strip()", "def interpret_requirement(string):\n string_list = split(string, sep=' ')\n \n requirement = Requirement(points, degree, majors, levels, max_non_degree)\n return requirement", "def lisp_string(python_string):\n return '\"%s\"' % python_string.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')", "def _safe(text):\n return text.replace(\"'\", \"''\").replace(\"\\\\\", \"\\\\\\\\\")", "def typeify(s):\n try:\n return literal_eval(s)\n except:\n return s", "def munge_condition_str(s):\n return s.replace(' and ', ' and\\n').replace(' or ', ' or\\n')", "def restricted_string_type(\n name: str,\n regex: Union[str, Pattern],\n docstring: Optional[str] = None,\n) -> type:\n if isinstance(regex, str):\n regex = re.compile(regex)\n expression = \"matching \" + regex.pattern\n\n extra_attrs = {\n \"_regex\": regex,\n \"_expression\": expression,\n \"_type\": str,\n }\n\n def check_value(cls, v):\n if not cls._regex.match(v):\n raise ValueError(f\"{v} does not match regular expression {cls._regex.pattern}\")\n\n return create_type(\n name=name,\n base_type=str,\n check_value=check_value,\n register_key=(expression, str),\n docstring=docstring,\n extra_attrs=extra_attrs,\n )", "def _get_legal(token):\n valid = re.split(r'[^]a-zA-Z0-0![,. {}@#$%^&*-_+=;:<>?/~\\'\\\\`]', token)\n return ''.join(valid).strip()", "def process_restriction(restriction):\n if not restriction:\n return ''\n else:\n res = restriction.lower()\n if res == 'eaff':\n return '-eAFF'\n elif res == 'ey2h':\n return '-eY2H'\n elif res == 'y2h':\n return '-rY2H'\n elif res == 'aff':\n return '-rAFF'\n else:\n raise network_generation.IncorrectRestrictionType(res)", "def _avert_unallowable(raw_string, escape_double_special_characters=False):\n output = []\n for c in raw_string:\n if c in _caret_escapes:\n output.append(_caret_escapes[c])\n elif escape_double_special_characters and c == '\"':\n output.append('^\"')\n else:\n output.append(c)\n return ''.join(output)", "def decode(cls, encstr: str) -> Tuple['Restriction', str]:\n alts = []\n while len(encstr) != 0:\n if encstr.startswith('&'):\n encstr = encstr[1:]\n break\n alt, encstr = Alternative.decode(encstr)\n alts.append(alt)\n return cls(alts), encstr", "def eval_cast(string):\n\n return W.string_eval_expression(string, {}, {}, {})", "def string_with_double_quote_pattern_validate_regular_expression(cls, value):\n if value is None:\n return value\n\n if not re.match(r\"this is \\\"something\\\"\", value):\n raise ValueError(r\"must validate the regular expression /this is \\\"something\\\"/\")\n return value", "def quote(s):\n return unescape(quoteattr(s))", "def AsRegEx(self):\n parts = _REGEX_SPLIT_PATTERN.split(self._value)\n result = u\"\".join(self._ReplaceRegExPart(p) for p in parts)\n\n return rdf_standard.RegularExpression(u\"(?i)\\\\A%s\\\\Z\" % result)", "def from_string(self, regex_str: str):\n return RegexReader(regex_str)", "def normalise_string(string):\n # Disallow spaces\n if ' ' in string:\n raise PydmrsValueError('Predicates must not contain spaces')\n # Strip surrounding quotes and disallow other quotes\n if string[0] == '\"' and string[-1] == '\"':\n string = string[1:-1]\n if string[0] == \"'\":\n warn('Predicates with opening single-quote have been deprecated', PydmrsDeprecationWarning)\n string = string[1:]\n if '\"' in string:\n raise PydmrsValueError('Predicates must not contain quotes')\n # Force lower case\n if not string.islower():\n warn('Predicates must be lower-case', PydmrsWarning)\n string = string.lower()\n # Strip trailing '_rel'\n if string[-4:] == '_rel':\n string = string[:-4]\n \n return string", "def decode(cls, encstr: str) -> Tuple['Alternative', str]:\n cond = None\n end_off = 0\n\n # Swallow field up to conditiona\n while end_off < len(encstr):\n if encstr[end_off] in string.punctuation:\n cond = encstr[end_off]\n break\n end_off += 1\n if cond is None:\n raise ValueError('{} does not contain any operator'\n .format(encstr))\n field = encstr[:end_off]\n end_off += 1\n\n value = ''\n while end_off < len(encstr):\n if encstr[end_off] == '|':\n # We swallow this\n end_off += 1\n break\n if encstr[end_off] == '&':\n break\n if encstr[end_off] == '\\\\':\n end_off += 1\n value += encstr[end_off]\n end_off += 1\n\n return cls(field, cond, value), encstr[end_off:]", "def _MakeRE(regex_str):\n return re.compile(regex_str.format(**SHORTHAND))", "def reg_name(nstr:str) -> object :\r\n\r\n elements=nstr.split(\" \")\r\n combs=word_combination(elements)\r\n lregex=[]\r\n for comb in combs :\r\n if len(comb) > 1 :\r\n lregex.append(\"(?i:{})\".format('[\\.\\- _,;:]?'.join(comb))) #Here to change character seperation between the words\r\n elif len(comb) == 1 :\r\n lregex.append(\"(?i:{})\".format(comb))\r\n else :\r\n pass\r\n \r\n return re.compile('({})'.format(\"|\".join(lregex)))", "def safe_str(self, string):\n return self.db.escape_string(string)", "def from_string(string, _or=''):\n if _or:\n and_or = 'or'\n else:\n and_or = ''\n return Input(string, and_or=and_or)", "def _eval(s):\n l = []\n safechars = ('/', '+', '-', '*', '.', ')', '(')\n for c in s:\n if c.isdigit() or c in safechars:\n l.append(c)\n return eval(''.join(l))", "def test_parse_quotes_doublequote(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a (\\\") character\")):\n api.parse_quote(\" This is a quote\\\". | Author | Publication | tag1, tag2 , tag3 \",\n simple_format=False)", "def cry(s : str) -> CryptolTerm:\n return CryptolTerm(s)", "def parse_from_placeholder(string,pattern,encloser='%',matcher='(.+)'):\n pattern,fields = placeholder_to_regex(pattern,encloser,matcher)\n return parse_from_regex(string,pattern,fields)", "def parse_string_2(string):\n string = re.sub(r\"\\'\", \"\", string)\n return string.strip().lower()", "def validate_safe_string(value):\n # The following strings are explicitly allowed, despite having otherwise-illegal chars.\n legal_strings_with_special_chars = frozenset({'@rid', '@class', '@this', '%'})\n\n if not isinstance(value, six.string_types):\n raise TypeError(u'Expected string value, got: {} {}'.format(\n type(value).__name__, value))\n\n if not value:\n raise GraphQLCompilationError(u'Empty strings are not allowed!')\n\n if value[0] in string.digits:\n raise GraphQLCompilationError(u'String values cannot start with a digit: {}'.format(value))\n\n if not set(value).issubset(VARIABLE_ALLOWED_CHARS) and \\\n value not in legal_strings_with_special_chars:\n raise GraphQLCompilationError(u'Encountered illegal characters in string: {}'.format(value))", "def sanitise(string: str) -> str:\n return \"_\".join(re.findall(re.compile(\"[^ @&()/]+\"), string))", "def test_bug_652575():\n assert _do_test_raw(\"var x = 'capability.policy.';\").failed()", "def parse(s):\n return expr.parseString(s, parseAll=True)", "def parse_mask(string):\n return string.split(' = ')[1]", "def _createAutoLinkRelaxWordEntryRE(word):\r\n # Split into parts of contiguous alphanumeric characters\r\n parts = AutoLinkRelaxSplitRE.split(word)\r\n # Filter empty parts\r\n parts = [p for p in parts if p != u\"\"]\r\n\r\n # Instead of original non-alphanum characters allow arbitrary\r\n # non-alphanum characters\r\n pat = ur\"\\b\" + (AutoLinkRelaxJoinPAT.join(parts)) + ur\"\\b\"\r\n regex = re.compile(pat, AutoLinkRelaxJoinFlags)\r\n\r\n return regex", "def _requires_quotes(self, value):\n lc_value = value.lower()\n return (lc_value in self.reserved_words\n or self.illegal_initial_characters.match(value[0])\n or not self.legal_characters.match(unicode(value))\n or (lc_value != value))", "def address_regex(self) -> Any:", "def test_regex_doublequotehandling(self):\n with pytest.raises(yaml.scanner.ScannerError) as excinfo:\n DwcaValidator(yaml.load(self.yaml_regexitdouble, Loader=yaml.FullLoader), error_handler=WhipErrorHandler)\n assert \"found unknown escape character 'd'\" in str(excinfo.value)", "def ps_filter(val):\n if isinstance(val, Undefined):\n return UNDEFINED_LABEL\n escaped = []\n for char in str(val):\n if char in \"`$#'\\\"\":\n char = \"`\" + char\n elif char == '\\0':\n char = \"`0\"\n elif char == '\\a':\n char = \"`a\"\n elif char == '\\b':\n char = \"`b\"\n elif char == '\\f':\n char = \"`f\"\n elif char == '\\n':\n char = \"`n\"\n elif char == '\\r':\n char = \"`r\"\n elif char == '\\t':\n char = \"`t\"\n elif char == '\\v':\n char = \"`v\"\n escaped.append(char)\n return ''.join(escaped)", "def test_parse_simple_quote_with_double_quote(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a (\\\") character\")):\n api.parse_quote(\" We accept the love we think we \\\" deserve. - Stephen Chbosky\",\n simple_format=True)", "def literal(cls, log_pattern_string: str) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"literal\", [log_pattern_string])", "def condition(self,string):\n \n retval = []\n s = string\n if(s == None):\n return(retval)\n\n # first double quoted strings\n while(len(s)):\n i1 = s.find('\"')\n i2 = s.find('\"',i1+1)\n if(i1 < 0 or i2 < 0):\n retval = retval + s.split()\n return(retval)\n if(i1>0):retval = retval + s[:i1].split()\n retval = retval + [s[i1+1:i2]]\n s = s[i2+1:]\n\n\n return(retval)", "def valid_sql_in_clause_str(input_str):\n\n if not input_str:\n return False\n\n if re.search(r\"^(\\s)*'(.+)'(\\s)*((\\s)*(,)(\\s)*('(.+)'))*$\", input_str):\n return True\n \n return False", "def test_regex_noquotehandling(self):\n\n with pytest.raises(cerberus.schema.SchemaError) as excinfo:\n DwcaValidator(yaml.load(self.yaml_regexit, Loader=yaml.FullLoader), error_handler=WhipErrorHandler)\n\n assert \"{'quotes': [{'regex': ['must be of string type']}]}\" in \\\n str(excinfo.value)", "def test_load_special_chars_2(query_factory):\n text = \"what's on at {{8 p.m.|sys_time}|range}?\"\n processed_query = markup.load_query(text, query_factory)\n entities = processed_query.entities\n\n assert len(entities) == 1\n\n entity = entities[0]\n assert entity.text == \"8 p.m.\"\n assert entity.normalized_text == \"8 p m\"\n assert entity.span == Span(13, 18)\n assert entity.entity.type == \"range\"\n\n nested = entity.entity.value[\"children\"][0]\n assert nested.text == \"8 p.m.\"\n assert nested.span == Span(0, 5)\n assert nested.entity.type == \"sys_time\"\n assert nested.entity.value[\"value\"]", "def getPythonObjectStrInQuotes( inObj ):\n classStr= str(inObj)\n reMatch= re.match( \".*?'(.*?)'.*\", classStr )\n return reMatch.groups()[0]", "def parse(s):\n return s", "def reparam(string_, dictionary):\n dictionary = dictionary.copy() # eval mucks with it\n # disable builtins to avoid risk for remote code exection.\n dictionary['__builtins__'] = object()\n vals = []\n result = []\n for live, chunk in _interpolate(string_):\n if live:\n v = eval(chunk, dictionary)\n result.append(sqlquote(v))\n else: \n result.append(chunk)\n return SQLQuery.join(result, '')", "def text_of_quotation(exp):\n return cadr(exp)", "def addslashes(val):\n return re.escape(val)", "def _restricted_search_mentions(val: str):\n try:\n val = str(val)\n except ValueError:\n raise argparse.ArgumentTypeError(f\"{val} could not be parsed to a string\")\n\n if not val.startswith('@'):\n return '@' + val\n return val", "def from_normalised_string(string):\n if string[0] == '_':\n raise PydmrsValueError(\"GPred strings must not begin with an underscore\")\n else:\n return GPred(string)", "def ldap_filter(val):\n if isinstance(val, Undefined):\n return UNDEFINED_LABEL\n escaped = []\n for char in str(val):\n if char < '0' or char > 'z' or char in \"\\\\*()\":\n char = \"\\\\%02x\" % ord(char)\n escaped.append(char)\n return ''.join(escaped)", "def quoteString(s):\n if s is None:\n return None\n quoted = str(s).replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\")\n return \"'{}'\".format(quoted)", "def unstringify(cls, s: str, unescape_pipe: bool = True)->str:\n if s.startswith(cls.LANGUAGE_QUALIFIED_STRING_SIGIL):\n language: str\n s, language = s.rsplit(\"@\", 1)\n if unescape_pipe:\n s = s.replace('\\\\|', '|')\n return ast.literal_eval(s)", "def polishString(s): \n return re.sub(\"[/\\\\\\?\\|<>:\\\"\\*]\",\"_\",s).strip()", "def escape(s, pattern=r'(\\W)'):\n r = re.compile(pattern)\n return r.subn(r'\\\\\\1', s)[0]", "def escape_like(string, escape_char=\"\\\\\"):\n return (\n string.replace(escape_char, escape_char * 2)\n .replace(\"%\", escape_char + \"%\")\n .replace(\"_\", escape_char + \"_\")\n )", "def esc_quotes(strng):\n\n return strng.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\")", "def LegalIn(f):\n return search(field='legalities', method=HASKEY, value=f.lower())", "def test_if_filter_statement():\n r = convert_code(\n \"{if awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"", "def escape(x):\n if '\\'' not in x:\n return '\\'' + x + '\\''\n s = '\"'\n for c in x:\n if c in '\\\\$\"`':\n s = s + '\\\\'\n s = s + c\n s = s + '\"'\n return s", "def un_quote(param):\n return param.replace(\"\\'\", \"\").replace(\"\\\"\", \"\").replace(\"\\\\\", \"\")", "def evaluate_string(string):\n return ast.literal_eval(string)", "def make_filter_specification(cls, filter_string):\n try:\n return parse_filter(filter_string)\n except ParseException as err:\n raise ValueError('Expression parameters have errors. %s' % err)", "def main(self, regex_string):\n sql_sen = regex_string[0][0]\n reg = \"\\$\\w+\"\n if re.search(reg, sql_sen, re.I):\n\n p = re.compile(reg)\n match = p.findall(sql_sen)\n return match\n return None", "def escape_string(text):\n return escape(text)", "def ScanRE(self, exp):\n # Make sure the expression is not empty\n assert type(exp) is str \n assert exp\n \n self.NotedRE = list()\n\n i = 0\n while i < len(exp):\n if exp[i] == ' ':\n i += 1\n elif exp[i] == '\\\\':\n ch = exp[i:i + 2]\n i += 2\n else:\n ch = exp[i]\n i += 1\n \n self.NotedRE.append(rule.CheckCharType(ch))", "def autoconvert(s):\n try:\n return eval(s)\n except:\n return s", "def find_restricted_words(content):\n restricted_words_obj = db.engine.execute(\"select * from restricted_word;\")\n restricted_words_dict = []\n for row in restricted_words_obj:\n if ' ' + row[1].upper().strip() + ' ' in content:\n restricted_words_dict.append({'id': row[0], 'phrase': row[1].upper()})\n\n return restricted_words_dict", "def filter_invalid_str(s):\n return s.parent.name not in ['style', 'script'] and not isinstance(s, Comment)", "def parse_string_3(string):\n string = re.sub(r\"\\\"\", \"\", string)\n return string.strip().lower()", "def str_to_container(str_or_obj: str or object):\n try:\n return literal_eval(str_or_obj)\n except (ValueError, SyntaxError):\n return str_or_obj", "def clean_str(string):\r\n string = re.sub(r\"\\\\\", \"\", string)\r\n string = re.sub(r\"\\'\", \"\", string)\r\n string = re.sub(r\"\\\"\", \"\", string)\r\n return string.strip().lower()", "def clean_str(string):\r\n string = re.sub(r\"\\\\\", \"\", string)\r\n string = re.sub(r\"\\'\", \"\", string)\r\n string = re.sub(r\"\\\"\", \"\", string)\r\n return string.strip().lower()", "def clean_str(string):\r\n string = re.sub(r\"\\\\\", \"\", string)\r\n string = re.sub(r\"\\'\", \"\", string)\r\n string = re.sub(r\"\\\"\", \"\", string)\r\n return string.strip().lower()", "def dequote(self, in_str):\n in_str = in_str.replace(\"'\", \"\")\n in_str = in_str.replace('\"', \"\")\n return in_str", "def RegexSafe(regex):\n regex = regex.lower()\n regex = regex.replace('.', '\\.')\n regex = regex.replace('-', '\\-')\n # This should never happen but best to be careful.\n regex = regex.replace('||', '|')\n return regex", "def masked_by_quotechar(S, quotechar, escapechar, test_char):\n if test_char == \"\":\n return False\n escape_next = False\n in_quotes = False\n i = 0\n while i < len(S):\n s = S[i]\n if s == quotechar:\n if escape_next:\n i += 1\n continue\n if not in_quotes:\n in_quotes = True\n else:\n if i + 1 < len(S) and S[i + 1] == quotechar:\n i += 1\n else:\n in_quotes = False\n elif s == test_char and not in_quotes:\n return False\n elif s == escapechar:\n escape_next = True\n i += 1\n return True", "def get_field(fieldname):\n m = re.search(\"(^|\\\\n)%s\\\\s(.*?)\\n\" % fieldname, s, re.I)\n if not m:\n return None\n else:\n return Unquote(m.group(2))", "def parse_pattern(s: str) -> str:\n # Escape regex metacharacters\n for c in [\"\\\\\", \".\", \"(\", \")\", \"[\", \"]\", \"^\", \"$\", \"*\", \"+\", \"?\", \"|\"]:\n s = s.replace(c, \"\\\\\" + c)\n\n s = re.sub(\"~+\", \".*\", s)\n s = \"^\" + s + \"$\"\n return s", "def from_str ( cls, s, strict=False ):\n rwx_user = RWX.from_str ( s[0:3], strict=strict )\n rwx_group = RWX.from_str ( s[3:6], strict=strict )\n rwx_others = RWX.from_str ( s[6:9], strict=strict )\n return cls ( rwx_user, rwx_group, rwx_others )", "def string_constant(self):\n col = self.pos\n try:\n self.match('\\\"')\n\n chars = []\n while not self.eos() and self.read() != '\\\"':\n\n # An escaped \" should not close the string\n if self.read(2) == \"\\\\\\\"\":\n chars.append(self.read(2))\n self.pos += 2\n else:\n chars.append(self.read())\n self.pos += 1\n\n self.match('\\\"')\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid string constant.\")\n\n constant = StringConstant(self.line, col, self.prev_white, \"\".join(chars))\n return constant", "def raw_string(seq):\n\n def f(s):\n \"\"\" Filter latex \"\"\"\n r = s.replace('\\\\', '\\\\\\\\').replace('_', '\\_').replace('^', '\\^')\n return r\n\n return [ f(k) for k in seq ]", "def shellquote(arg):\n if re.match('^[-_.:/=a-zA-Z0-9]*$', arg):\n return arg\n else:\n return \"'%s'\" % arg.replace(\"'\", r\"'\\''\")", "def is_valid_pred_string(predstr):\n predstr = predstr.strip('\"').lstrip(\"'\")\n # this is a stricter regex than in Pred, but doesn't check POS\n return re.match(\n r'_([^ _\\\\]|\\\\.)+_[a-z](_([^ _\\\\]|\\\\.)+)?(_rel)?$'\n r'|[^_]([^ \\\\]|\\\\.)+(_rel)?$',\n predstr\n ) is not None", "def test_escape_argument_path_with_space():\n encoded = win_functions.escape_argument(\"C:\\\\Some Path\\\\With Spaces\")\n assert encoded == '^\"C:\\\\Some Path\\\\With Spaces^\"'", "def qstring(self, s):\n\n if '\"' in s or ' ' in s or '\\\\' in s:\n return '\"' + s.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"') + '\"'\n else:\n return s", "def unquote(s, *a, **kw):\n return quote(s, *a, **kw)", "def from_str ( cls, s, strict=False ):\n readable, writable, executable = False, False, False\n\n if strict:\n _s = s.lower()\n readable = _s[0] == 'r'\n writable = _s[1] == 'w'\n executable = _s[2] == 'x'\n\n elif s:\n for char in s.lower():\n if char == 'r':\n readable = True\n elif char == 'w':\n writable = True\n elif char == 'x':\n executable = True\n # -- end for\n # -- end if\n\n return cls ( readable, writable, executable )", "def dot_escape(s):\n s = re.sub(r'([^a-zA-Z0-9\" ])', r\"\\\\\\1\", s)\n return s", "def normalize(s):\n s = str(s.strip())\n patt = re.compile(r'^\\?\\w+$')\n if patt.match(s):\n name = s[1:]\n return Variable(name)\n return s", "def clean_str(string):\n string = re.sub(r\"\\\\\", \"\", string)\n string = re.sub(r\"\\'\", \"\", string)\n string = re.sub(r\"\\\"\", \"\", string)\n return string.strip().lower()", "def clean_str(string):\n string = re.sub(r\"\\\\\", \"\", string)\n string = re.sub(r\"\\'\", \"\", string)\n string = re.sub(r\"\\\"\", \"\", string)\n return string.strip().lower()", "def clean_str(string):\n string = re.sub(r\"\\\\\", \"\", string)\n string = re.sub(r\"\\'\", \"\", string)\n string = re.sub(r\"\\\"\", \"\", string)\n return string.strip().lower()", "def quot(string):\r\n return string.replace('\"', \"'\")", "def processword(word):\n word = word.lower()\n word = word.strip('()?,!`.-:\\\"\\n \\'')\n return word", "def escape_single_quote(unescaped):\n\t# requirements = re\n\treturn re.sub(r'(\\'|\\\\)', r'\\\\\\1', unescaped)", "def is_special_text(text):\n return len(text) > 5 and \\\n (text[0:5] == '\"VAR:' or text[0:5] == '\"TER:') and \\\n text[-1] == '\"'", "def __convert_zone_to_regex(zone_queried):\n regex_string = \"^(.*\\\\.)*\" + re.escape(zone_queried) + \"$\"\n return re.compile(regex_string)", "def parse_cond(cond):\n if isinstance(cond, ColumnProxy):\n return cond.conds\n if not isinstance(cond, str_types):\n return cond\n m = COND_RE.match(cond)\n if m is None:\n msg = 'Could not parse condition from {0}'\n raise ValueError(msg.format(cond))\n c = tuple(map(stripper, m.groups()))\n return c", "def _eval(self, estr):\n from vgdl.ontology import *\n return eval(estr)" ]
[ "0.5168612", "0.5026671", "0.49126112", "0.48980483", "0.4877988", "0.485538", "0.48332566", "0.48306048", "0.4796996", "0.47644973", "0.4762547", "0.47521722", "0.4743869", "0.47309223", "0.47302634", "0.4718924", "0.47140232", "0.47117367", "0.4668038", "0.46525672", "0.46359733", "0.4625946", "0.4619906", "0.46132216", "0.46042398", "0.4586213", "0.4583964", "0.4576204", "0.45618618", "0.45545098", "0.4542231", "0.454129", "0.4535988", "0.45358363", "0.45270893", "0.451781", "0.45039132", "0.4491602", "0.4487074", "0.4483594", "0.4481818", "0.44761315", "0.44732538", "0.44663152", "0.4460541", "0.44509706", "0.44504276", "0.4447396", "0.44453204", "0.4444368", "0.44204083", "0.44190028", "0.44188502", "0.44146842", "0.44139552", "0.44044104", "0.43957454", "0.43943074", "0.43872795", "0.43862432", "0.43857512", "0.43847415", "0.4382327", "0.4379704", "0.4376027", "0.4375302", "0.4371558", "0.43612963", "0.4355295", "0.4351247", "0.43479833", "0.4347054", "0.4347054", "0.4347054", "0.43451676", "0.4344951", "0.4342347", "0.433454", "0.43308908", "0.4329544", "0.43242198", "0.43207964", "0.4320244", "0.43200845", "0.43198392", "0.43196782", "0.43156636", "0.43145397", "0.43131948", "0.4312307", "0.43114242", "0.43114242", "0.43114242", "0.43058494", "0.43033704", "0.43011808", "0.428207", "0.42814353", "0.42793244", "0.42761296" ]
0.6378927
0
Helper to produce an id 'restriction'
def unique_id(cls, unique_id: Union[int, str], version: Optional[Union[int, str]] = None) -> 'Restriction': idstr = str(unique_id) if '-' in idstr: raise ValueError('Hyphen not allowed in unique_id {}'.format(idstr)) if version: idstr += '-{}'.format(version) # We use the empty field for this, since it's always present. return cls([Alternative('', '=', idstr)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def friendly_id(self):\n id = f\"{self.annotator_id}_{self.document_title.split('_')[0]}\"\n\n try: # try making an sentence identifier if there is an in_sentence attrib\n sen_id = \",\".join(str(se.element_id + 1) for se in self.in_sentence)\n id += f\"_s{sen_id}\"\n except Exception as e:\n print(e)\n pass\n\n if isinstance(self, Event):\n id += f\"_{self.event_fulltype}\"\n elif isinstance(self, Participant) or isinstance(self, Filler):\n id += f\"_{self.role}\"\n\n text_ellips = (\n (self.text[:15] + \"..\" + self.text[-15:])\n if len(self.text) > 32\n else self.text\n )\n id += f\"-{text_ellips}\"\n return id", "def _get_anno_id(self, start):\n if self.orientation > 0: # positive strand\n return '%s:%d' % (self.id, start % 3)\n else: # negative strand\n return '%s:-%d' % (self.id, (-start) % 3)", "def getID():", "def get_id(self):\n if self.mlat:\n return f'm{-self.mlat}_{self.mlng}'\n else:\n return f'{-self.clat}_{self.clng}'", "def unique_id(self):\n return f\"octopus_energy_intelligent_charge_limit\"", "def reqid(self) -> str:", "def reqid(self) -> str:", "def _internal2document_id(value):\n return 2*value + 1", "def create_id(elements: Iterable) -> str:\r\n i = 1\r\n while str(i) in elements:\r\n i += 1\r\n return str(i)", "def get_id(self): # real signature unknown; restored from __doc__\n return \"\"", "def _document2internal_id(value):\n return (value - 1)//2", "def fullId(self):\n return self.sampleid+' '+self.condition+' '+self.activeChildWellIdStr()", "def computed_id(o):\n\n if o.id is not None and o.id.startswith(namespace + \":\"):\n return o.id\n\n return \"{i.namespace}:{i.accession}\".format(i=computed_identifier(o))", "def generateMatchClause(code, type, i):\n match = WOQLQuery().woql_and(\n WOQLQuery().idgen(\"doc:\" + type, [code], \"v:ID_\"+str(i)),\n WOQLQuery().cast(code, \"xsd:string\", \"v:Label_\"+ str(i))\n #WOQLQuery().idgen(\"doc:\" + type, [{\"@value\": code, \"@type\": \"xsd:string\"}], \"v:ID_\"+str(i)),\n #WOQLQuery().cast({\"@value\": code, \"@type\": \"xsd:string\"}, \"xsd:string\", \"v:Label_\"+ str(i))\n )\n return match", "def format_id(i):\n return f'{i:>{ID_SIZE}}'", "def _create_id(length=40):\n\n numbers = map(str, range(10))\n letters = string.ascii_lowercase\n options = [*letters[:letters.index('f') + 1], *numbers]\n\n return ''.join(random.choice(options) for _ in range(length))", "def id_func(param):\n if isinstance(param, dict) and \":name:\" in param:\n return param[\":name:\"]\n\n retval = str(param)\n if len(retval) > 25:\n retval = retval[:20] + \"...\" + retval[-2:]\n return retval", "def id_func(param):\n if isinstance(param, dict) and \":name:\" in param:\n return param[\":name:\"]\n\n retval = str(param)\n if len(retval) > 25:\n retval = retval[:20] + \"...\" + retval[-2:]\n return retval", "def id_format(param, **kwa):\n try:\n ns, ti = param.split('-')\n if ns and ti:\n return param\n else:\n raise ValueError\n except ValueError:\n raise ValueError('Supplied id is invalid.')", "def id_tag(self) -> str:\n return f\"pmid-{self.id}\" if self.id else \"\"", "def get_identifier(self):", "def get_actual_id(translated):", "def roleDocumentId(self, id: str) -> str:", "def id(cls) -> 'tuple[Literal[\"InARP\"]]': # type: ignore[override]\n return ('InARP',)", "def _id(self):\n pass", "def process_restriction(restriction):\n if not restriction:\n return ''\n else:\n res = restriction.lower()\n if res == 'eaff':\n return '-eAFF'\n elif res == 'ey2h':\n return '-eY2H'\n elif res == 'y2h':\n return '-rY2H'\n elif res == 'aff':\n return '-rAFF'\n else:\n raise network_generation.IncorrectRestrictionType(res)", "def case_id():\n return 3000", "def identifier(self):", "def id(self) -> str:\n pass", "def generate_identifier(sender, instance, **kwargs):\n identifier = Concept.create_identifier(instance.query)\n qs = Concept.objects.filter(identifier=identifier, lang=instance.lang)\n if instance.pk:\n qs = qs.exclude(pk=instance.pk)\n if qs.count() > 0:\n raise ValueError(\"Concept identifier conflict\")\n instance.identifier = identifier", "def id_for_label(self):\n return \"\"", "def _next_rId(self):\n tmpl = 'rId%d'\n next_rId_num = 1\n for relationship in self._values:\n if relationship._num > next_rId_num:\n return tmpl % next_rId_num\n next_rId_num += 1\n return tmpl % next_rId_num", "def new_id(self):\n self._tmp_id_counter += 1\n return '%.' + str(self._tmp_id_counter)", "def _create_finding_id(control_id, resource_name, length=20):\n input = control_id + resource_name\n hex = hashlib.sha256(input.encode('UTF-8')).hexdigest()\n result = int(hex, 16) % (10 ** length)\n return str(result)", "def part_id(self):\n ...", "def make_html5_id(orig: str) -> str:\n clean_id = PAT_INVALID_ID_CHARACTERS.sub(\"-\", orig)\n if not clean_id:\n clean_id = \"unnamed\"\n return clean_id", "def id(self):\n # Might also be a first 12-characters shortcut.\n return self._id", "def eidr_identifier(title):\n pass", "def id(self):\n return '%s-%x' % (self.qname, id(self))", "def _id(self, document):\n pass", "def _get_uuid(self, *args: Any, **kwargs: Any) -> str:\n return \"collisions\"", "def fill_id(id):\n if len(str(id)) < 7:\n length = len(str(id))\n id = \"0\"*(7 - length) + str(id)\n return str(id)", "def get_id(self, refobj):\n return cmds.getAttr(\"%s.identifier\" % refobj)", "def req_id(self) -> str:\n pass", "def id(self):\n return \"{model:s}--{serial:08x}\".format(model=self.model.replace('-',''), serial=self.serial_number).lower()", "def resourceid(self):", "def unique_id() -> str:", "def html_id(self):\r\n id_fields = [self.DEPRECATED_TAG, self.org, self.course, self.category, self.name, self.revision]\r\n id_string = u\"-\".join([v for v in id_fields if v is not None])\r\n return Location.clean_for_html(id_string)", "def get_alternative_id(self):\n if self.id is not None:\n # custom id from `@parametrize(ids=<callable_or_list>)`\n return self.id\n else:\n return mini_idvalset(self.argnames, self.argval, idx=self.alternative_index)", "def _expanded_id(name: str, sep: str = '_') -> str:\n return sep.join([el.lower()\n for el in re.split(r'([A-Z]+[^A-Z]*)', name)\n if el])", "def generate_room_id():\r\n id_length = 6\r\n while True:\r\n id_tmp = ''.join(random.SystemRandom().choice(\r\n string.ascii_uppercase) for _ in range(id_length))\r\n conflict = id_tmp in rooms\r\n if not conflict:\r\n return id_tmp", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def build_parameter_set_identifier(\n parameter_set: DWDObservationParameterSet,\n resolution: DWDObservationResolution,\n period: DWDObservationPeriod,\n station_id: str,\n date_range_string: Optional[str] = None,\n) -> str:\n identifier = (\n f\"{parameter_set.value}/{resolution.value}/\" f\"{period.value}/{station_id}\"\n )\n\n if date_range_string:\n identifier = f\"{identifier}/{date_range_string}\"\n\n return identifier", "def get_id(self, desired: int = -1) -> int:\n\n if desired == -1:\n return super().get_id()\n else:\n self._used.add(desired)\n return desired", "def _build_name(name_id):\n return \"xp_%08d\" % name_id", "def make_id_str(self):\n options = [n for n, v in self.option_settings.items() if v]\n return \":\".join([self.name] + options)", "def id_(x: Any) -> Any:\n return x", "def build_id():\n return \"test123\"", "def autoid(self) -> str:", "def autoid(self) -> str:", "def grr_id(line: Text) -> Text:\n del line # Unused.\n return magics_impl.grr_id_impl()", "def setFreeId(self,element):\n if element.__class__==Subtoken:\n prefix = 'st'\n inlist = self.subtokens.values()\n elif element.__class__==DepToken:\n prefix = 'dt'\n inlist = self.deptokens.values()\n elif element.__class__==RelToken:\n prefix = 'rt'\n inlist = self.reltokens.values()\n elif element.__class__==DepEntity:\n prefix = 'db'\n inlist = self.depentities.values()\n elif element.__class__==RelEntity:\n prefix = 'rb'\n inlist = self.relentities.values()\n elif element.__class__==DepNode:\n prefix = 'dn'\n inlist = self.dependencies.nodes()\n elif element.__class__==RelNode:\n prefix = 'rn'\n inlist = self.interactions.nodes()\n elif element.__class__==DepEdge:\n prefix = 'de'\n inlist = [x[2] for x in self.dependencies.edges()]\n elif element.__class__==RelEdge:\n prefix = 're'\n inlist = [x[2] for x in self.interactions.edges()]\n else:\n inlist = []\n printWarning(self.__class__,\n inspect.stack()[0][3],\n \"%s should not have id\"%(element.__class__))\n used = map(lambda x:x.getRunningId(),inlist)\n element.id = '.'.join([prefix, self.id, FreeIdIter(used).get()])\n return(True)", "def maxid() -> int:\n pass", "def getId(self):\n return '%s%08X' % (self.id,self.index)", "def concept_id_fields(item):\n return scom.concept_id_fields(item)", "def prepId(self, id, subchar='_'):\n return globalPrepId(id, subchar)", "def resourceDocumentId(self, resource: Resource) -> str:", "def getId(self): #$NON-NLS-1$\r", "def new_relation_id(self):\n relation_id = None\n js_maxint = 2**53-1\n while not relation_id: # We don't like 0 either\n relation_id = randrange(-js_maxint, js_maxint)\n if relation_id in self.relation_to_rids: # pragma: no cover\n relation_id = None\n return relation_id", "def id_for_label(self, id_):\n return id_", "def find(id: Union[int, str]) -> 'Wall':\n pass", "def create_simple_restriction(datasets, datset_name, restriction):\n if datset_name in datasets and restriction is not None:\n dataset = datasets[datset_name]\n return get_restriction(restriction, dataset)", "def get_id(type_: Dict[str, str]) -> int:\n return int(type_[f'{type_name}_id'])", "def id(self):\n raise NotImplementedError()", "def _id(self):\n result = ''\n while self.current_char is not None and self.current_char.isalnum() or self.current_char == '_':\n result += self.current_char\n self.advance()\n\n return Token(ID, result)", "def format_id(self, html=False):\n if self.term_type == 'C':\n full_id = 'KEGG:' + self.org_prefix + self.term_id\n else:\n full_id = 'KEGG:' + self.term_type\n\n if html:\n term_id = self.id_anchor_fmt % (self.url(), full_id)\n else:\n term_id = full_id\n return term_id", "def getID(self) -> int:\n ...", "def _add_ID(self, preferred_id):\n self.id = preferred_id\n while self.id in Thing.ID_dict: # unique-ify self.id if necessary\n self.id = self.id + str(random.randint(0, 9))\n Thing.ID_dict[self.id] = self\n return self.id", "def get_id(self, desired: int = -1) -> int:\n if desired > 0 and desired not in self._used:\n # The desired ID is available!\n self._used.add(desired)\n return desired\n\n # Check every ID in order to find a valid one.\n poss_id = self.search_pos\n while True:\n if poss_id not in self:\n self._used.add(poss_id)\n self.search_pos = poss_id + 1\n return poss_id\n poss_id += 1", "def _build_id():\n # 1: Timestamp\n current_id = FindanceIdField.date_to_int(datetime.utcnow().replace(tzinfo=timezone.utc)) << 23\n\n # 2: Shard ID (For now, always one)\n current_id |= SHARD_ID << 10\n\n # 3: Auto-incr with the last 10 bits\n current_id |= next(BASIC_TICK) % 1024\n\n return current_id", "def _format_id(self, index):\n if index in self.VALID_ID_RANGE:\n return '!ID {}\\n'.format(str(index).zfill(self.MAX_DIGITS_QTD))\n raise IndexError(\"IDFile._format_id: {} is out of range\".format(index))", "def name_id(self, fn:str='Jane',ln:str='Dear',sec:int=5):\n code = '[0-9]{4:%s}'% int(sec)\n return f\"{fn[0].capitalize()}{ln[0].capitalize()}{StringGenerator(str(code)).render(unique=True)}\"", "def generate_id(self):\n unique_id = \"\"\n\n while len(unique_id) < self.id_length:\n ascii_number = self.get_random_bits()\n\n if self.is_approved_ascii(ascii_number):\n random_char = chr(ascii_number)\n\n if not self.is_excluded_char(random_char):\n unique_id += chr(ascii_number)\n\n return unique_id", "def _CreateRecordId(self):\n self._record_count += 1\n return '%s_%s' % (self._unique_id, self._record_count)", "def _clean_id(self, dirty_id):\n return self.wsid_regex.sub(\"\", dirty_id.replace(\" \", \"_\"))", "def _identifier_suffix(self):\r\n return ''", "def showId(self):\n extent = self.getExtent()\n id = self.parent_id\n levels = self.getLevels()\n prefix = settings.NESTED_TAXONOMY_PREFIX\n \n # name = prefix,id,levels,extent\n \n name = '%s:%s:%s:%s' %(prefix,id,levels,extent)\n return name", "def _unique_id():\n id = \"\"\n for i in xrange(0,8):\n id += choice(ascii_letters)\n return id", "def getUniStr(self):\n return('-'.join([\"%s-%s\"%(x.id,(x.id in self.special))\n for x in self.getNested()]))" ]
[ "0.6390797", "0.59261435", "0.5828868", "0.58210576", "0.57121724", "0.56987214", "0.56987214", "0.5654812", "0.56331956", "0.5582573", "0.5568012", "0.55624306", "0.55531055", "0.5549231", "0.55474275", "0.5540417", "0.5539157", "0.5539157", "0.5516462", "0.55037725", "0.5498906", "0.5495896", "0.54892373", "0.54886675", "0.54614043", "0.5454641", "0.54463685", "0.5439757", "0.54322255", "0.5423986", "0.54143834", "0.5412241", "0.540288", "0.53959495", "0.5393793", "0.5387611", "0.5354571", "0.5317968", "0.5309148", "0.53009313", "0.52946943", "0.5287216", "0.5286117", "0.52858627", "0.5283324", "0.5266407", "0.5263398", "0.5260655", "0.52574795", "0.5253436", "0.52371776", "0.522561", "0.522561", "0.522561", "0.522561", "0.522561", "0.522561", "0.522561", "0.522561", "0.522561", "0.522561", "0.522561", "0.522561", "0.5222528", "0.52193135", "0.5207186", "0.5203547", "0.5200177", "0.51975197", "0.51937413", "0.51937413", "0.51908195", "0.5189307", "0.51880664", "0.5186796", "0.5186701", "0.51856965", "0.51818466", "0.51761603", "0.51754963", "0.51754594", "0.51735944", "0.5169018", "0.51585066", "0.514934", "0.5146808", "0.5142451", "0.51405936", "0.51367205", "0.51341", "0.5127477", "0.5116933", "0.51025784", "0.5101119", "0.51008224", "0.5098679", "0.50970364", "0.5094872", "0.5093438", "0.50918293" ]
0.5554285
12
Tests the restrictions against the values dict given. Normally values are treated strings, but conditions only work if they're actually integers. Returns (True, '') if everything is good. Otherwise, returns (False, reasonstring)
def are_restrictions_met(self, values: Dict[str, Any]) -> Tuple[bool, str]:, for r in self.restrictions: reasons = r.test(values) if reasons is not None: return False, reasons return True, ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test(self, values: Dict[str, Any]) -> Optional[str]:\n # This is always True\n if self.cond == '#':\n return None\n\n def why(cond, field, explanation) -> Optional[str]:\n if cond:\n return None\n return '{}: {}'.format(field, explanation)\n\n # If it's missing, it's only True if it's a missing test.\n if self.field not in values:\n # Default to ignoring id field as long as no version.\n if self.field == '':\n return why('-' not in self.value, 'id', 'unknown version {}'.format(self.value))\n return why(self.cond == '!', self.field, 'is missing')\n\n # If they supply a function, hand it to them.\n if callable(values[self.field]):\n return values[self.field](self)\n\n val = str(values[self.field])\n if self.cond == '!':\n return why(False, self.field, 'is present')\n elif self.cond == '=':\n return why(val == self.value,\n self.field,\n '!= {}'.format(self.value))\n elif self.cond == '/':\n return why(val != self.value,\n self.field,\n '= {}'.format(self.value))\n elif self.cond == '^':\n return why(val.startswith(self.value),\n self.field,\n 'does not start with {}'.format(self.value))\n elif self.cond == '$':\n return why(val.endswith(self.value),\n self.field,\n 'does not end with {}'.format(self.value))\n elif self.cond == '~':\n return why(self.value in val,\n self.field,\n 'does not contain {}'.format(self.value))\n elif self.cond == '<':\n try:\n actual_int = int(val)\n except ValueError:\n return why(False, self.field, \"not an integer field\")\n try:\n restriction_val = int(self.value)\n except ValueError:\n return why(False, self.field, \"not a valid integer\")\n return why(actual_int < restriction_val,\n self.field,\n \">= {}\".format(restriction_val))\n elif self.cond == '>':\n try:\n actual_int = int(val)\n except ValueError:\n return why(False, self.field, \"not an integer field\")\n try:\n restriction_val = int(self.value)\n except ValueError:\n return why(False, self.field, \"not a valid integer\")\n return why(actual_int > restriction_val,\n self.field,\n \"<= {}\".format(restriction_val))\n elif self.cond == '{':\n return why(val < self.value,\n self.field,\n 'is the same or ordered after {}'.format(self.value))\n elif self.cond == '}':\n return why(val > self.value,\n self.field,\n 'is the same or ordered before {}'.format(self.value))\n else:\n # We checked this in init!\n assert False", "def test(self, values: Dict[str, Any]) -> Optional[str]:\n reasons = []\n for alt in self.alternatives:\n reason = alt.test(values)\n if reason is None:\n return None\n reasons.append(reason)\n\n return \" AND \".join(reasons)", "def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n try:\n rune = Rune.from_base64(b64str)\n except: # noqa: E722\n return False, \"runestring invalid\"\n if not self.is_rune_authorized(rune):\n return False, \"rune authcode invalid\"\n return rune.are_restrictions_met(values)", "def _check_value(self):\n value = str(self._value_field.toPlainText())\n if value=='': return True\n ACCEPTABLES_CHARS = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0',\n '.', ',', ';', ' ', '\\n', '-')\n\n for char in value:\n if not char in ACCEPTABLES_CHARS:\n return False\n if Variable.is_acceptable_arg(value):\n rows, columns = np.matrix(value).shape\n return 1 <= rows <= 4 and 1 <= columns <= 4\n else:\n return False", "def _security_check_parameters(param_dict):\n for key, value in param_dict.iteritems():\n str_value = str(value) # Could easily be an int or a float\n for bad_str in [\";\", \"&&\", \">\", \"<\", \"|\"]:\n if bad_str in str_value:\n raise ValueError(\"Rejecting suspicious argument for %s\" % key)", "def checkValue(c, m, y, k):\n MINVAL=0\n MAXVAL=255\n valueOk=True\n for val in c, m, y, k:\n if val >=MINVAL and val <=255:\n pass\n else:\n valueOk=False\n \n return valueOk", "def has_valid_values(self):\n for element, value in self.items():\n if not (0 <= value <= 1):\n return False\n return True", "def _check_allowed_values(self, parameters):\n for key, allowed_values in self.ALLOWED_VALUES:\n self.log([u\"Checking allowed values for parameter '%s'\", key])\n if key in parameters:\n value = parameters[key]\n if value not in allowed_values:\n self._failed(u\"Parameter '%s' has value '%s' which is not allowed.\" % (key, value))\n return\n self.log(u\"Passed\")", "def __allowed_values_inccorrect_string(self):\n strTestName = 'Values of a string (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n\n RxCSObject.paramAddMan('parameter3', 'string')\n RxCSObject.paramType('parameter3', str)\n RxCSObject.paramAllowed('parameter3', ['Allowed string #1', 'Allowed string #2'])\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 21\n RxCSObject.parameter3 = 'Allowed string #3'\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n return MasterRune(secret).check_with_reason(b64str, values)", "def sanitize_input(ll):\n p = sum([l[1] for l in ll])\n if not all([l[0] == int(l[0]) for l in ll]):\n if round(p, 5) != 1:\n return \"It's not a valid distribution and furthermore, one or more variable value are not integers\"\n else:\n return \"All the variable values should be integers\"\n if round(p, 5) != 1:\n return \"It's not a valid distribution\"", "def is_valid(self, value) -> 'True | str':\n err_str = super().is_valid()\n if isinstance(err_str, str):\n return err_str\n if value < self.min_val or value > self.max_val:\n return f'The value \"{value}\" must be in range <{self.min_val}, {self.max_val}>.'\n return True", "def _validate_dict_data(self, expected, actual):\n for k, v in expected.iteritems():\n if k in actual:\n if (isinstance(v, basestring) or\n isinstance(v, bool) or\n isinstance(v, (int, long))):\n if v != actual[k]:\n return \"{}:{}\".format(k, actual[k])\n elif not v(actual[k]):\n return \"{}:{}\".format(k, actual[k])\n else:\n return \"key '{}' does not exist\".format(k)\n return None", "def filter_cond(line_dict):\n if(line_dict[\"if1\"] == ''):\n return False\n cond_match = (\n (int(line_dict[\"if1\"]) > 20 and int(line_dict[\"if1\"]) < 40)\n ) \n return True if cond_match else False", "def check_value(self, value):", "def _check_values(self, values):\n mod = []\n for k, v in values.items():\n if isinstance(v, str) and \"'\" in v:\n mod.append(k)\n if len(mod) == 0:\n return values\n else:\n values = copy.copy(values)\n for k in mod:\n values[k] = values[k].replace(\"'\", \"''\")\n return values", "def eval_dict(self, value):\n\n okay = True\n if all(ast_class(k) == 'Str' for k in value.keys):\n count = 0\n for v in value.values:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay", "def test_for_criteria(self):\n ignore = ['interpreter_method', 'average_by_sample_or_site', 'include_nrm']\n values = ([dic['value'] for dic in self.acceptance_criteria.values() if (dic['criterion_name'] not in ignore and dic['value'] != -999)])\n return values", "def validMisc(self, p_upperRateLim, p_lowerRateLim, p_fixedAVDelay, p_modulationSensitivity):\n checks = 0;\n for val in [p_upperRateLim, p_lowerRateLim, p_fixedAVDelay, p_modulationSensitivity]:\n if val is None:\n checks += 1;\n #check p_lowerRateLim\n if not (p_lowerRateLim is None):\n for valid in frange(30,50,5):\n if p_lowerRateLim == valid:\n checks += 1;\n break\n for valid in frange(51,90,1):\n if p_lowerRateLim == valid:\n checks += 1;\n break\n for valid in frange(95,175,5):\n if p_lowerRateLim == valid:\n checks += 1;\n break\n #check p_upperRateLim\n if not (p_upperRateLim is None):\n for valid in frange(50,175,5):\n if p_upperRateLim == valid: #and p_upperRateLim >= p_lowerRateLim: #need to implement\n checks += 1;\n break\n if not (p_modulationSensitivity is None):\n if p_modulationSensitivity > 0 and p_modulationSensitivity <= 16:\n if p_modulationSensitivity is int:\n checks += 1;\n if not (p_fixedAVDelay is None):\n for valid in frange(70,300,10):\n if valid == p_fixedAVDelay:\n checks += 1\n if checks == 4:\n return True\n return False", "def check_value(self, key: str, value: Any):\n # Check the value with a set of tests\n self._check_missing(key, value)\n self._check_allowed_values(key, value)\n self._check_data_type(key, value)\n self._check_value_range(key, value)", "def validate_strength(cls, value: str) -> (bool, dict):\n if value is None:\n return False, {}\n\n length = cls._validate_length(value)\n digit = cls._validate_digit(value)\n uppercase = cls._validate_uppercase(value)\n lowercase = cls._validate_lowercase(value)\n symbol = cls._validate_symbol(value)\n\n valid = all([length, digit, uppercase, lowercase, symbol])\n error_dict = {\n 'length': length,\n 'digit': digit,\n 'uppercase': uppercase,\n 'lowercase': lowercase,\n 'symbol': symbol,\n }\n\n return valid, error_dict", "def check_validity(self):\n try:\n if self.type == ConstraintTypes.EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.NOT_EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.WITHIN:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], type(self.value[1])), \"Invalid types.\"\n )\n enforce(\n isinstance(self.value[1], type(self.value[0])), \"Invalid types.\"\n )\n elif self.type == ConstraintTypes.IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.NOT_IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.DISTANCE:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], Location),\n \"Invalid type, expected Location.\",\n )\n enforce(\n isinstance(self.value[1], float), \"Invalid type, expected Location.\"\n )\n else: # pragma: nocover\n raise ValueError(\"Type not recognized.\")\n except ValueError:\n return False # pragma: nocover\n\n return True", "def _check_value(self,val):\n if self.allow_None and val is None:\n return\n\n if not _is_number(val):\n raise ValueError(\"Parameter '%s' only takes numeric values\"%(self._attrib_name))\n \n self._checkBounds(val)", "def __allowed_values_correct_string(self):\n strTestName = 'Values of a string (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'string')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramAllowed('parameter1', ['Allowed string #1', 'Allowed string #2'])\n\n RxCSObject.parameter1 = 'Allowed string #2'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def _check_helper(self, value, raise_exceptions=True) -> bool:\n if not isinstance(value, self.value_type):\n if raise_exceptions:\n raise InvalidParameterException(\n '%s: invalid type given: %s (required %s)' % (\n self.name, type(value),\n ', '.join([str(x) for x in self.value_type])\n )\n )\n return False\n\n return True", "def __allowed_values_correct_number(self):\n strTestName = 'Values of a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 0\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def a_list(test_val: object, test_col: object, valid_values: object) -> object:\n tv_upper = test_val.upper()\n rc: bool = True\n # noinspection PyTypeChecker\n value_list = [x[test_col] for x in valid_values]\n value_list_upper = [x.upper() for x in value_list]\n if tv_upper not in value_list_upper:\n print(f'{test_val} is invalid. Valid values are {str(value_list)}')\n rc = False\n return rc", "def validate_dict(types,val,allowed,typ):\n if not len(types): return TYPE_MISMATCH\n if str(type(val)) not in typ['list']: raise(Exception('unknown type'))\n for k,v in val.items():\n result=VALIDATORS[types[-1]](types[:-1],v,allowed,types[-1])\n if not result: return result\n return True", "def __allowed_values_inccorrect_number(self):\n strTestName = 'Values of a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 1.4\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def test_bad_values(self):\n self.assertOK([60])\n self.assertRaisesInternalError([59.9])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])", "def isValidData(self, fzQuery):\n\n errStr = None\n res = False\n\n if fzQuery.int1 != None and fzQuery.int1 > 0 \\\n and fzQuery.int2 != None and fzQuery.int2 > 0 and fzQuery.int2 != fzQuery.int1 \\\n and fzQuery.mlimit != None and fzQuery.mlimit > 0 and fzQuery.mlimit > fzQuery.int1 and fzQuery.mlimit > fzQuery.int2 \\\n and fzQuery.str1 != None and len(fzQuery.str1) > 0 \\\n and fzQuery.str2 != None and len(fzQuery.str2) > 0:\n res = True\n elif fzQuery.int1 == None:\n errStr = \"int1 must be set\"\n elif fzQuery.int2 == None:\n errStr = \"int2 must be set\"\n elif fzQuery.mlimit == None:\n errStr = \"limit must be set\"\n elif fzQuery.int1 < 0: \n errStr = \"int1 must be greather than zero\"\n elif fzQuery.int2 < 0 : \n errStr = \"int2 must be greather than zero\"\n elif fzQuery.int2 == fzQuery.int1:\n errStr = \"int1 and int2 must not be equal\"\n elif fzQuery.mlimit < 0: \n errStr = \"limit must be greather than zero\"\n elif fzQuery.mlimit < fzQuery.int1 or fzQuery.mlimit < fzQuery.int2:\n errStr = \"limit must be greather than int1 and int2\"\n elif fzQuery.str1 == None or len(fzQuery.str1) == 0:\n errStr = \"str1 must be a valid string\"\n elif fzQuery.str2 == None or len(fzQuery.str2) == 0:\n errStr = \"str2 must be a valid string\"\n else :\n errStr = \"bad request, can't validate your query\" \n return res, errStr", "def check_unexpected_values(self, expected_values, scraped_values):\n\n\t\tfor key in scraped_values:\n\t\t\tself.assertIn(key, expected_values)", "def _validate_value(self, value):\n if self.limits[0] <= value <= self.limits[1]:\n return True\n else:\n return False", "def _check_value(item, allowed_values, item_name=None, extra=None):\n if item not in allowed_values:\n item_name = \"\" if item_name is None else \" '%s'\" % item_name\n extra = \"\" if extra is None else \" \" + extra\n msg = (\n \"Invalid value for the{item_name} parameter{extra}. \"\n \"{options}, but got {item!r} instead.\"\n )\n allowed_values = tuple(allowed_values) # e.g., if a dict was given\n if len(allowed_values) == 1:\n options = \"The only allowed value is %s\" % repr(allowed_values[0])\n elif len(allowed_values) == 2:\n options = \"Allowed values are %s and %s\" % (\n repr(allowed_values[0]),\n repr(allowed_values[1]),\n )\n else:\n options = \"Allowed values are \"\n options += \", \".join([f\"{repr(v)}\" for v in allowed_values[:-1]])\n options += f\", and {repr(allowed_values[-1])}\"\n raise ValueError(\n msg.format(\n item_name=item_name, extra=extra, options=options, item=item\n )\n )\n\n return item", "def check_measure_fields(self, values: Set[str], tol: float = 0.7) -> bool:\n counter: int = 0\n if type(values) is list:\n if len(values) == 0:\n sc.message(\"WARNING: NER MAPPER HAS A FIELD WITH NO VALUES.\")\n return False\n\n for value in values:\n tmp = value.split()\n if len(tmp) == 2 and tmp[0].isnumeric():\n counter += 1\n return (counter / len(values)) > tol\n else:\n return False", "def test_condition(c, flag_dict, unspecified_val):\n\n for ci in c:\n if ci.name not in flag_dict:\n if not unspecified_val:\n return False\n elif ci.enabled != flag_dict[ci.name]:\n return False\n return True", "def _check_pert(self, **kwargs):\n conditions = {\n 'mode >= low' : kwargs['mode'] >= kwargs['low'],\n 'high >= mode' : kwargs['high'] >= kwargs['mode'],\n }\n for condition_name, condition_value in conditions.items():\n if condition_value == False:\n err = 'Param \"{}\" fails PERT requirement \"{}\".'.format(kwargs, condition_name)\n raise FairException(err)", "def test_match_valid_data_val(self):\n f = lws.valid_data_val\n schema_val = ('some text', str, 'text')\n assert f(schema_val, 'text') is True\n schema_val = ('some number', float, 7.00)\n assert f(schema_val, 7) is False\n assert f(schema_val, 7.00) is True\n schema_val = ('True', bool, True)\n assert f(schema_val, True) is True\n assert f(schema_val, False) is False\n schema_val = ('even', int, lambda x: x % 2 == 0)\n assert f(schema_val, 2) is True\n assert f(schema_val, 257) is False", "def getIsValidParameters(self):\n isVolumValid = self.volume > 0\n if not isVolumValid:\n return isVolumValid, 'Invalid input volume'\n\n if not self.lowerLimitPressureInspiratory <= self.pressureInspiratory <= self.higherLimitPressureInspiratory:\n return False, f'Selected inspiratory value must be between {self.lowerLimitPressureInspiratory} cmH20 ' \\\n f'and {self.higherLimitPressureInspiratory} cmH2O.'\n\n if not self.lowerLimitPressureExpiratory <= self.pressureExpiratory <= self.higherLimitPressureExpiratory:\n return False, f'Selected expiratory value must be between {self.lowerLimitPressureExpiratory} cmH20 ' \\\n f'and {self.higherLimitPressureExpiratory} cmH2O.'\n\n return self._getIsValidParameters()", "def checkPrescribedValues(self,prescribedValues):\n\n for value in prescribedValues:\n if not float(int(value[0])) == value[0]:\n print(\"false\")\n return False\n else:\n return True", "def _check_valid_value(self, value):\n if self._possible_values is None: # validation not defined (profile)\n return\n if value in self._possible_values:\n return\n if value is not None and \"ANY\" in self._possible_values:\n return\n msg = (\"'%s' is not a valid 'options.%s' value.\\nPossible values are %s\"\n % (value, self._name, self._possible_values))\n raise ConanException(msg)", "def _check_params_do(name, val):\n if name == 'info_hash':\n return len(val) == 20\n elif name == 'peer_id':\n return len(val) == 20 and STORAGE.check_peer(val)\n elif name == 'numwant':\n return int(val) < 250\n fail(REASON_REQUEST_ERROR)", "def type_restrictions(data):\n\n exam_type = [\"main\", \"MAIN\", \"CAT\", \"cat\"]\n if data not in exam_type:\n return False\n return True", "def check_value(self, name, min_int, max_int):\n while True:\n numb = input(f\"-- {name} : Entrez une valeur comprise \"\n f\"entre {min_int} et {max_int} : \")\n try:\n check = int(numb)\n if check == 99 or min_int <= check <= max_int:\n break\n except ValueError:\n pass\n return check", "def validate_data(values):\n try:\n [int(value) for value in values]\n if len(values) != 6:\n raise ValueError(\n f'Exactly 6 values are required - you provided {len(values)}'\n )\n except ValueError as e:\n print(f'Invalid data entered: {e}, please try again!\\n')\n return False\n\n return True", "def test_positive_value_exists(self):\n #######################################\n # Test for True\n value_to_test = 1\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = 100\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = 'hello'\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = True\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = {\n 'success': True\n }\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = [\n 'success'\n ]\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n #######################################\n # Test for False\n value_to_test = 0\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = -1\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = ''\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = '0'\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = False\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = {}\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = []\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))", "def constraint_clause_valid_values_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n values = getattr(presentation, field.name)\n if isinstance(values, list):\n the_type = presentation._get_type(context)\n for value in values:\n coerce_value(context, presentation, the_type, None, None, value, field.name)", "def check_expected_values(self, expected_values, scraped_values):\n\n\t\tfor key in expected_values:\n\t\t\tself.assertIn(key, scraped_values)\n\t\t\tself.assertEqual(expected_values[key], scraped_values[key])", "def verify_settings(settings):\r\n\r\n if 'limit' not in settings:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit' option must be defined!\")\r\n return False\r\n\r\n if type(settings['limit']) != int:\r\n iridium_manager_tracer.warning(\"Settings: 'limit' must be an int!\")\r\n return False\r\n\r\n if 'limit_interval' not in settings:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit_interval' option must be defined!\")\r\n return False\r\n\r\n if type(settings['limit_interval']) != str:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit_interval' must be an str!\")\r\n return False\r\n\r\n # Force limit interval setting to always be lower case\r\n settings['limit_interval'] = settings['limit_interval'].lower()\r\n\r\n values = ''\r\n for item in TimeIntervals:\r\n if settings['limit_interval'] == item['name']:\r\n break\r\n values += item['name'] + ', '\r\n else:\r\n iridium_manager_tracer.warning(\"Settings: \" \\\r\n \"'limit_interval' must be one of the following: %s\", values)\r\n return False\r\n\r\n return True", "def validate_fields(self, window, values):\n \n #Check if record id is new\n is_valid = True\n problem_field_name = \"\"\n experiment_names = GUI.api.get_experiment_names()\n if values['record_id'] in experiment_names:\n is_valid = False\n problem_field_name = \"Record ID\"\n return is_valid, problem_field_name \n \n metadata = GUI.api.get_metadata()\n enbaled_fields = filter(lambda elem: (elem['form_name']=='material_information' or elem['form_name']=='printer_information') \n and not (isinstance(window[elem['field_name']], sg.Text) or window[elem['field_name']].Disabled), metadata)#only validate enbaled fields\n for field in enbaled_fields:\n validation = field['text_validation_type_or_show_slider_number']\n value = values[field['field_name']]\n if (validation == \"number\" and value.isdigit()):\n #check if correct ranges\n if field['text_validation_max'] != \"\":\n if value > field['text_validation_max']:\n is_valid = False \n problem_field_name = field['field_label']\n return is_valid, problem_field_name \n if field['text_validation_min'] != \"\":\n if value < field['text_validation_min']:\n is_valid = False \n problem_field_name = field['field_label']\n return is_valid, problem_field_name \n elif (validation == \"number\" and not value.isdigit()):\n is_valid = False\n problem_field_name = field['field_label']\n return is_valid, problem_field_name\n return is_valid, problem_field_name", "def validate_values(self):\n if self.avp_def.has_defined_values():\n defined_values = dict(self.avp_def.attr_defined_values)\n if self.avp_value not in defined_values.values():\n raise ValueError(\n f\"{self.avp_def.attr_name} - value {self.avp_value} \"\n \"is not allowed\")\n\n return True", "def _check_allowed_values(self, key: str, value: Any):\n allowedValues = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"allowedValues\", None)\n if allowedValues is not None and value not in allowedValues:\n raise Exception(\n f\"Value '{value}' is not an allowed value for '{key}'. Allowed values are: {', '.join(allowedValues)}\"\n )", "def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")", "def test_if_keys_or_values_in_result_dict_are_int(self):\n for key, value in add_expressions(1, 2, 8)(2, 3).items():\n self.assertIsInstance(key, int)\n self.assertIsInstance(value, int)", "def only_choice(values):\n ## Used the provided solutions to be sure that my implementation of diagonals and\n ## Twins is ok\n for digit in '123456789':\n for unit in unitlist:\n dplaces = [box for box in unit if digit in values[box]]\n if len(dplaces) == 1:\n assign_value(values, dplaces[0], digit)\n return values", "def is_valid(val, val_type=\"key\"):\n if val_type == \"key\":\n if not isinstance(val, str):\n raise ValueError(f\"Key [{val}] must be of type str.\")\n return len(val) <= config.MAX_KEY_LEN\n elif val_type == \"value\":\n if isinstance(val, dict):\n return sys.getsizeof(val) <= config.MAX_VALUE_SIZE\n raise ValueError(f\"Value [{val}] must be of type dict.\")", "def validate_dict(data_dict, entity):\r\n fields = []\r\n for key, value in data_dict.items():\r\n if not value:\r\n fields.append(key)\r\n continue\r\n if len(fields) > 0:\r\n return provide_field_value(entity, fields)\r\n elif key == hqAddKey:\r\n status = validate_hqadd(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == logoUrlKey:\r\n status = validate_logourl(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == type_key:\r\n status = validate_officeType(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == name_key:\r\n status = None\r\n if entity == party_key:\r\n status = validate_partyname(value)\r\n elif entity == office_key:\r\n status = validate_officeName(value)\r\n if not status == ok_str:\r\n return status\r\n if fields:\r\n return provide_field_value(entity, fields)\r\n return ok_str", "def check_restrictions(restrictions, element, keys, verbose):\n params = OrderedDict(zip(keys, element))\n for restrict in restrictions:\n if not eval(replace_param_occurrences(restrict, params)):\n if verbose:\n print(\"skipping config\", get_instance_string(params), \"reason: config fails restriction\")\n return False\n return True", "def invalid(values):\n # for box in values.keys():\n # if len(values[box]) == 0:\n # return True\n # return False\n return len([box for box in values.keys() if len(values[box]) == 0]) != 0", "def ValidateValue(value, fielddef):\n validate = GetFieldDef(fielddef, fields='validate')\n\n if value == 0:\n # can not complete all validate condition\n # some Tasmota values are not allowed to be 0 on input\n # even though these values are set to 0 on Tasmota initial.\n # so we can't validate 0 values\n return True\n\n valid = True\n try:\n if isinstance(validate, str): # evaluate strings\n valid = eval(validate.replace('$','value'))\n elif callable(validate): # use as format function\n valid = validate(value)\n except:\n valid = False\n\n return valid", "def Validate(self, win):\n\n txtCtrl = self.GetWindow()\n val = txtCtrl.GetValue()\n isValid = False\n if val.isdigit():\n digit = int(val)\n if digit >= self._min and digit <= self._max:\n isValid = True\n if not isValid:\n # Notify the user of the invalid value\n msg = \"Value must be between %d and %d\" % \\\n (self._min, self._max)\n wx.MessageBox(msg,\n \"Invalid Value\",\n style=wx.OK|wx.ICON_ERROR)\n return isValid", "def only_choice(values):\n for unit in unitlist:\n for digit in '123456789':\n choices = [box for box in unit if digit in values[box]]\n if len(choices) == 1:\n values = assign_value(values, choices[0], digit)\n \n return values", "def _validate_value(self, val):\r\n if type(val) in (int, long, float, str, unicode, ):\r\n return val\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_value(i)\r\n return val\r\n raise TypeError(\r\n \"Only number/strings and tuples/frozensets allowed here.\",\r\n )", "def validate_filterval(filterval):\n if filterval != 'description' and filterval != 'fulldescription' and filterval != 'completed':\n return False\n else:\n return True", "def _validate_ranges(self, concentrations: dict) -> None:\n\n for k, v in concentrations.items():\n for i in v:\n # since H+ is interpreted as a pH, we have to include all values\n if k in ('H+', 'pH'):\n continue\n if i <= 0:\n raise InvalidConcentrationError", "def _hasValuesChecker(entity, params):\n \n for key, values in constraints.iteritems():\n if entity.__getattribute__(key) not in values:\n return False\n\n return True", "def only_choice(values):\n\tfor unit in unitlist:\n\t\tfor digit in '123456789':\n\n\t\t\tmatches = []\n\t\t\t\n\t\t\tfor box in unit:\n\t\t\t\tif digit in values[box]:\n\t\t\t\t\tmatches.append(box)\n\t\t\t\t\n\t\t\tif len(matches) == 1:\n\t\t\t\tvalues = assign_value(values, matches[0], digit)\n\n\treturn values", "def check_solved(self, values):\n if values == None: #Forward_checking determines that values state is invalid -> set false, check if false here.\n return False\n\n for box in values.keys():\n if len(values[box]) != 1:\n return False\n return True", "def check_supported(check_val_list, valid_meas_dic):\r\r\n invalid_list = []\r\r\n\r\r\n for val in check_val_list:\r\r\n try:\r\r\n dummy = valid_meas_dic[val]\r\r\n except KeyError:\r\r\n invalid_list.append(val)\r\r\n\r\r\n if invalid_list:\r\r\n errMsg = (\"The following is unsupported %s\" %invalid_list)\r\r\n errMsg = errMsg + (\"\\nThe list of valid values is %s\" %valid_meas_dic.keys())\r\r\n raise ExGeneral(errMsg)", "def _check_parameters(self, target_function, **kwargs):\n # Ensure all arguments are =< 0 where relevant\n for keyword, value in kwargs.items():\n # Two conditions\n value_is_less_than_zero = value < 0\n keyword_is_relevant = keyword in ['mean', 'constant', 'low', 'mode', 'high']\n # Test conditions\n if keyword_is_relevant and value_is_less_than_zero:\n raise FairException('\"{}\" is less than zero.'.format(keyword))\n # Check that all required keywords are provided\n required_keywords = self._required_keywords[target_function]\n for required_keyword in required_keywords:\n if required_keyword in kwargs.keys():\n pass\n else:\n raise FairException('\"{}\" is missing \"{}\".'.format(str(target_function), required_keyword))", "def test_invalid_request_values(self):\n TEST_DATA = [\n (-100, 0, 0, 0),\n (100, 0, 0, 0),\n (0, -190, 0, 0),\n (0, 190, 0, 0),\n (0, 0, 0, -10),\n (0, 0, 0, 370)\n ] # yapf: disable\n for (lat, lon, alt, heading) in TEST_DATA:\n self.assertEqual(400,\n self.eval_request_values(lat, lon, alt, heading))", "def _validate_values(self, sample):\n result = True\n paths = []\n #Search vor necessary paths accorduing to comparison_style\n if self._comparison_style == ComparisonStyle.minimum:\n paths = self._find_all_paths(self._reference)\n else:\n paths = self._find_all_paths(sample)\n # For every path, if it is endling in an key, validate the key\n for path in paths:\n reference_value = MappingValidator._get_value(self._reference,\n list(path))\n mapping_value = MappingValidator._get_value(sample, list(path))\n if isinstance(mapping_value, abc.Mapping):\n continue\n elif isinstance(reference_value, type):\n result = result and isinstance(mapping_value, reference_value)\n elif callable(reference_value):\n result = result and bool(reference_value(mapping_value))\n elif isinstance(reference_value, re._pattern_type):\n result = result and bool(reference_value.match(mapping_value))\n elif isinstance(reference_value, list):\n list_contains_sample_val = False\n for possibility in reference_value:\n if possibility == mapping_value:\n list_contains_sample_val = True\n break\n result = result and list_contains_sample_val\n elif reference_value is Ellipsis:\n result = result and True\n else:\n result = result and False\n if not result:\n break\n return result", "def verify_rpc_value ( user_dict ):\n for key in user_dict:\n if not isinstance ( user_dict[ key ], str ):\n # Error code 422\n raise ValueError ( 'Value of {0} is not a string'.format ( key ) )", "def test_generating(resp):\n errors = []\n if not check_int(resp[\"tightness\"]):\n errors.append(\"Invalid type for Itinerary response's 'tightness' field.\")\n\n if not isinstance(resp, bool):\n errors.append(\"Invalid type for Itinerary response's 'start_from_airport' field.\")", "def is_value_legit(self, value):\n return value in self.domain and value in self.possible_domain", "def get_valid_values_map(self, condition=True):\n tpninfos = self.locate.get_all_tpninfos(self.instrument, self.filekind, \"ld_tpn\")\n required_keys = self.get_required_parkeys()\n valid_values = {}\n for info in tpninfos:\n if info.is_complex_constraint:\n continue\n if info.name in required_keys:\n values = info.values\n if len(values) == 1 and \":\" in values[0]:\n limits = values[0].split(\":\")\n try:\n limits = [int(float(x)) for x in limits]\n except Exception:\n pass\n # sys.exc_clear()\n else:\n values = list(range(limits[0], limits[1]+1))\n if condition:\n values = tuple([utils.condition_value(val) for val in values])\n valid_values[info.name] = values\n return valid_values", "def check_parameters(self):\n\n if self.process not in [\"Like\", \"Like-and-follow\"]:\n raiser('process')\n\n if \"type\" not in self.duration or \"value\" not in self.duration:\n raiser('duration(type or value)')\n else:\n typ = self.duration['type']\n val = self.duration['value']\n if self.process == \"Like\":\n if typ not in ['by_time', 'by_likes']:\n raiser('type')\n\n if \"like\" not in self.limits_per_hour:\n raiser('limitsPerHour(like)')\n else:\n try:\n self.limits_per_hour['like'] = float(self.limits_per_hour['like'])\n except ValueError:\n raiser('like')\n elif self.process == \"Like-and-follow\":\n if typ not in ['by_time', 'by_users']:\n raiser('type')\n\n if \"like\" not in self.limits_per_hour or \"follow\" not in self.limits_per_hour \\\n or \"unfollow\" not in self.limits_per_hour:\n raiser('limitsPerHour(like or follow or unfollow)')\n else:\n for i in [\"like\", \"follow\", \"unfollow\"]:\n try:\n self.limits_per_hour[i] = float(self.limits_per_hour[i])\n except ValueError:\n raiser(i)\n try:\n self.duration['value'] = float(val)\n except ValueError:\n raiser('value')\n\n if not isinstance(self.search_hashtags, list):\n raiser('hashtags')\n\n if not isinstance(self.white_list, list):\n raiser('whiteList')", "def checkallflags(flags_with_values,flags_withoutvalues,cldic):\r\n if len(set(flags_with_values).intersection(set(flags_without_values))) > 0:\r\n print ( \"error some flags appear in two lists of flags, with and without required values:\",set(flags_with_values).intersection(set(flags_without_values)))\r\n printcommandset()\r\n sys.exit(1)\r\n for flag in set(flags_with_values).union(set(flags_withoutvalues)):\r\n if flag not in cldic:\r\n print ( \"error some flag mismatch between strings of flags and dictionary of flags:\",flag)\r\n printcommandset()\r\n sys.exit(1)\r\n return", "def check(options, rules = rules):\n s = [\"str\", \"unicode\"]\n for key in options:\n if not key.endswith(\" comment\"):\n if key in rules:\n c = rules[key]\n else:\n raise OptionKeyError(key)\n value = options[key]\n if c[0] == \"U\": continue\n elif c[0] == \"POT\":\n if not(((value & (value - 1)) == 0) and value):\n raise OptionPOTError(key)\n elif c[0] == \"R\":\n if value not in list(range(c[1], c[2]+1)):\n raise OptionRangeError(key, c[1], c[2]+1)\n elif c[0] == \"B\":\n if value not in list(range(0, 2)):\n #print (value)\n raise OptionRangeError(key, 0, 2)\n elif c[0] == \"N1+\":\n if value < 1:\n raise OptionRangeError(key, 1, float(\"inf\"))\n elif c[0] == \"N0+\":\n if value < 0:\n raise OptionRangeError(key, 0, float(\"inf\"))\n elif c[0] == \"FN0+\":\n if value < 0:\n raise OptionRangeError(key, 0, float(\"inf\"))\n elif c[0] == \"N-1+\":\n if value < -1:\n raise OptionRangeError(key, -1, float(\"inf\"))\n elif c[0] == \"S\":\n if value.__class__.__name__ not in s:\n raise OptionTypeError(key, \"text\")\n elif c[0] == \"Name\":check_name(value,key)\n\n elif c[0] == \"L\":\n if value.__class__.__name__ != \"list\":\n raise OptionTypeError(key, \"list\")\n\n elif c[0] == \"C\":\n if len(value) != 3:\n raise OptionError()\n if sum(value) < 1:\n raise OptionError()\n else:\n raise Exception(\"%s not valid rule type from %s\" % (c[0], key))", "def is_valid_value(self, value):\n return value in self.values", "def value_type_check(table_rows):\n types = table_rows[0].types\n rst = True\n lst = []\n row_num = 0\n for row in table_rows:\n for i in range(0, len(row.values)):\n data_type = types[i].strip().upper()\n value = row.values[i].strip()\n if(data_type == \"INT\"):\n if(value != \"\" and _is_int(value) == False):\n rst = False\n lst.append(\"(col:{0},row:{1},value:{2})\".format(\n i, row_num, row.values[i]\n ))\n\n elif(data_type == \"FLOAT\"):\n if(value != \"\" and _is_float(value) == False):\n rst = False\n lst.append(\"(col:{0},row:{1},value:{2})\".format(\n i, row_num, row.values[i]\n ))\n row_num += 1\n return rst,\",\".join(lst)", "def value_error(number):\n try:\n nbr = int(number)\n except ValueError:\n print(\"You can't sum letters, please write a number\")\n verification = False\n else:\n verification = True\n return verification", "def test_when_condition_in_target_string_or_array(if_statement_validator):\n test = {\n 'condition': 'in',\n 'target': 123,\n 'then': 'bob',\n }\n validate_result = if_statement_validator(test)\n\n assert not is_successful(validate_result)\n assert \"not of type 'string', 'array'\" in str(validate_result.failure())", "def test_bad_value_type(self):\n\n print 'Patience, this may take 20 seconds'\n request = service.get_request('POST', {u'species': u'Nosuchtaxonia mistakea'})\n x = self.start_request_tests(request)\n self.assertTrue(x.status_code % 100 == 4, x.status_code)\n json.dump(x.to_dict(), sys.stdout, indent=2)\n # TBD: Change this to a *correct* check for message informativeness.\n m = x.json().get(u'message')\n self.assertTrue(u'species' in m, #informative?\n 'no \"species\" in \"%s\"' % m)", "def are_numeric(*values):\n\n for value in values:\n if not is_numeric(value):\n return False\n return True", "def solved(values):\n # for box in values.keys():\n # if len(values[box]) != 1:\n # return False\n # return True\n return len([box for box in values.keys() if len(values[box]) != 1]) == 0", "def testConditionReasons(self):\n \n state = State.from_problem(self.prob)\n\n relevantVars = []\n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n self.assert_(state.is_satisfied(drive.precondition, relevantVars))\n\n relevantVars = set(relevantVars)\n \n s1 = StateVariable(self.prob.functions[\"city-of\"][0], [self.prob[\"pos1\"]])\n s2 = StateVariable(self.prob.functions[\"city-of\"][0], [self.prob[\"apt1\"]])\n s3 = StateVariable(self.prob.functions[\"location-of\"][0], [self.prob[\"tru1\"]])\n \n self.assertEqual(len(relevantVars), 3)\n self.assert_(s1 in relevantVars)\n self.assert_(s2 in relevantVars)\n self.assert_(s3 in relevantVars)", "def check_emission_factors(cls, values):\n for v in values.values():\n if isinstance(v, list):\n assert len(v) > 0, \"Emission factors must not be an empty list\"\n return values", "def validate(self, value: Any, low: int, high: int) -> bool:\n pass", "def _ValueMismatch(how_much):\n return 'Values mismatch, %s' % how_much", "def _check_value(self, value, name, check_function):\n if check_function is not None:\n is_good = check_function(value) #May raise an exception\n assert is_good in [0,1,True,False]\n if not is_good:\n raise ValueError(\"Invalid parameter value %r for parameter %s\" \\\n % (value, name))", "def _getIsValidParameters(self):\n return True, ''", "def is_valid(self):\n\n # Test whether every element in required_keys is in actual_keys\n actual_keys = set(self.fields.keys())\n required_keys = set(self.required_keys)\n has_required_keys = required_keys <= actual_keys\n if not has_required_keys:\n return False\n\n # TODO: Complete the following block. \n\n # Assume all is valid at first, then as soon as one invalid\n # is detected, whole thing becomes invalid.\n all_valid = True \n\n # Now iterate over each key-value pair to check\n for key, value in self.fields.items():\n if key == 'byr':\n this_key_valid = len(str(value)) == 4 and (1920 <= value <= 2002)\n all_valid = all_valid and this_key_valid\n if key == 'iyr':\n this_key_valid = len(str(value)) == 4 and (2010 <= value <= 2020)\n all_valid = all_valid and this_key_valid\n if key == 'eyr':\n this_key_valid = len(str(value)) == 4 and (2020 <= value <= 2030)\n all_valid = all_valid and this_key_valid\n if key == 'hgt':\n if len(str(value)) < 4:\n all_valid = False\n else:\n ending = value[-2:]\n num = int(value[:-2])\n this_key_valid = (ending == 'in' and (59 <= num <= 76)) or (ending == 'cm' and (150 <= num <= 193))\n all_valid = all_valid and this_key_valid\n if key == 'hcl':\n re_str = '#[0-9a-f]{6}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 7\n all_valid = all_valid and this_key_valid\n if key == 'ecl':\n this_key_valid = value in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n all_valid = all_valid and this_key_valid\n if key == 'pid':\n re_str = '[0-9]{9}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 9\n all_valid = all_valid and this_key_valid\n if key == 'cid':\n this_key_valid = True\n all_valid = all_valid and this_key_valid\n\n # If all fields are valid, return True\n return all_valid", "def __check_datatype(event, resp):\n\n def unsigned_int_check(val):\n \"\"\"checks if something is a positive (unsigned) integer\n\n Arguments:\n val (object): value to check\n\n Returns:\n (bool): True if val is an integer, otherwise False\n \n \"\"\"\n\n is_valid_number = True\n try:\n val = int(val)\n if val < 0:\n raise ValueError(\"Not an unsigned int\")\n except ValueError as e:\n is_valid_number = False\n \n return is_valid_number\n \n # for start and end base, if not None, perform the number check\n # if either is not an unsigned int, this is a BAD REQUEST\n vals = [resp.get_datum(\"start\"), resp.get_datum(\"end\")]\n for val in vals:\n if val:\n if not unsigned_int_check(val):\n resp.set_status_code(SC.BAD_REQUEST)\n resp.set_body(json.dumps({\n \"message\": \"start/end must be unsigned int\"\n }))", "def validateMarketOrder(self, orderDict):\n try:\n if (orderDict['amount'] == 0 or (orderDict['min'] == 0 and orderDict['max'] == 0)):\n return 'You must place an order with a min/max and amount > 0'\n return 1\n except:\n return 'galaxy->validateMarketOrder error'", "def test_failing_if(self):\n\n bad_examples = \"\"\"\nif(department, score) ->\nThis should be a boolean column or expression\n\nif(department, score)\n ^\n===\nif(department = 2, score) ->\nCan't compare str to num\n\nif(department = 2, score)\n ^\n===\nif(department = \"1\", score, department, score*2) ->\nThis should be a boolean column or expression\n\nif(department = \"1\", score, department, score*2)\n ^\n===\nif(department = \"1\", score, valid_score, score*2, department, 12.5) ->\nThis should be a boolean column or expression\n\nif(department = \"1\", score, valid_score, score*2, department, 12.5)\n ^\n===\nif(department, score, valid_score, score*2) ->\nThis should be a boolean column or expression\n\nif(department, score, valid_score, score*2)\n ^\n===\nif(department = \"foo\", score, valid_score, department) ->\nThe values in this if statement must be the same type, not num and str\n\nif(department = \"foo\", score, valid_score, department)\n ^\n===\nif(department = \"foo\", department, valid_score, score) ->\nThe values in this if statement must be the same type, not str and num\n\nif(department = \"foo\", department, valid_score, score)\n ^\n\"\"\"\n\n for field, expected_error in self.bad_examples(bad_examples):\n with self.assertRaises(Exception) as e:\n self.builder.parse(field, forbid_aggregation=True, debug=True)\n if str(e.exception).strip() != expected_error.strip():\n print(\"===actual===\")\n print(e.exception)\n print(\"===expected===\")\n print(expected_error)\n print(\"===\" * 10)\n self.assertEqual(str(e.exception).strip(), expected_error.strip())", "def clean_value(self, value):\n if isinstance(value, str):\n return value.lower() in ('1', 'true')\n\n return value in (1, True)", "def checkCondition(self, left_context, mod, right_context):\n if self.condition == \"\":\n return(True)\n else:\n if self.ruleType == self.TYPE_OL:\n keys = self.symParam\n values = mod.param \n elif self.ruleType == self.TYPE_L1L:\n keys = self.left_context.param + self.symParam\n values = left_context.param + mod.param \n elif self.ruleType == self.TYPE_R1L:\n keys = self.symParam + self.right_context.param\n values = mod.param + right_context.param\n elif self.ruleType == self.TYPE_2L:\n keys = self.left_context.param + self.symParam + self.right_context.param\n values = left_context.param + mod.param + right_context.param\n new_dict = dict(zip(keys, values)) \n return(self.condition.evaluate(new_dict))", "def is_valid_row_or_col(val: str):\n try:\n val = int(val)\n if 1 <= val <= 10:\n return True\n return False\n except (ValueError, TypeError):\n return False", "def get_iv_description(value):\n\n return {\n value is None: '',\n value < 0.02: 'useless',\n 0.02 <= value < 0.1: 'weak',\n 0.1 <= value < 0.3: 'medium',\n 0.3 <= value < 0.5: 'strong',\n 0.5 <= value: 'excellent'\n }[True]" ]
[ "0.67283416", "0.66373456", "0.63170063", "0.6068614", "0.60020655", "0.5946913", "0.57890636", "0.5788082", "0.57537967", "0.57021195", "0.56713057", "0.5626302", "0.5622262", "0.5609995", "0.5586753", "0.55860853", "0.5585117", "0.5569001", "0.5539537", "0.5532167", "0.5508965", "0.5501328", "0.5495712", "0.54921186", "0.54814786", "0.5481348", "0.5475239", "0.5473147", "0.54602677", "0.5422842", "0.5400978", "0.5395609", "0.53844374", "0.5374956", "0.5354288", "0.5338592", "0.53236556", "0.53168213", "0.5309571", "0.5303233", "0.5289085", "0.52818495", "0.52683", "0.5261505", "0.5249501", "0.5235212", "0.5230874", "0.522402", "0.52238166", "0.52204907", "0.5218934", "0.5214644", "0.52105063", "0.52091616", "0.5194364", "0.5192307", "0.5185708", "0.5183681", "0.5179509", "0.51653373", "0.5156075", "0.51557297", "0.51556873", "0.515567", "0.51523995", "0.51517946", "0.51500124", "0.5145614", "0.5144776", "0.51395315", "0.51357806", "0.5135373", "0.51331615", "0.5128345", "0.5123813", "0.51169676", "0.5116759", "0.5115426", "0.5114199", "0.5097462", "0.5092845", "0.508229", "0.506326", "0.5062961", "0.5061274", "0.50477153", "0.5046707", "0.50453734", "0.50436383", "0.50415695", "0.5040849", "0.50385356", "0.5037546", "0.50373715", "0.503704", "0.5027222", "0.502498", "0.50223213", "0.5017193", "0.501122" ]
0.76134247
0
Perform a shallow copy
def copy(self) -> 'Rune': return self.__copy__()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shallow_copy(self):\n # TODO: Rename this to __copy__()?\n raise NotImplementedError(\"shallow_copy is not implemented\")", "def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)", "def copy(self):", "def _copy_(self):\n return copy.copy(self)", "def copy(self):\n\t\treturn pythoncopy.deepcopy(self)", "def __copy__(self):\n return self.copy()", "def copy(self):\n from copy import deepcopy\n return deepcopy(self)", "def __copy__(self, *args, **kwargs):\n return self.copy()", "def deepcopy(self):\n return copymod.deepcopy(self)", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):\r\n return copy.deepcopy(self)", "def deepcopy(self):\n return self.copy()", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy (self):\n import copy\n return copy.copy(self)", "def deepcopy(self):\n return copy.deepcopy(self)", "def copy(self):\r\n return copy.copy(self)", "def clone(self):\n return shallow_clone(self)", "def copy(self):\n return self.mutate().simple_copy()", "def deep_copy(self):\n return self.__class__(self.inputs, self.outputs, self.middle)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n \n return deepcopy(self)", "def copy(self):\n cpy = deepcopy(self)\n # usually we use copy to perform transformations on the board\n # so it's good to reset memoized values\n cpy._memoized_compact = None \n return cpy", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def __copy__(self):\n raise NotImplementedError", "def copy(self):\n return copy(self)", "def copy(self):\n return copy(self)", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def test_deepcopy(self):\n t = Identity()\n t.transform([2])\n copy.deepcopy(t)", "def clone(self):\n from copy import deepcopy\n return deepcopy(self)", "def __copy__(self):\n return self.__class__(self.m, self.n, self.data)", "def test_copy(self):\n data = [[0, 1], [1, 0]]\n b1 = Board(data)\n b2 = b1.copy()\n # test if proper copy\n self.assertListEqual(b1.data, b2.data)\n # teset if not just a shallow copy\n b1.data[0][0] = 1\n self.assertNotEqual(b1.data[0][0], b2.data[0][0])", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def copy(self):\n try:\n return self.__class__(self, copy=True)\n except TypeError:\n new = self.__class__(copy.deepcopy(self))\n return new", "def copy(self, deep=False):\n return _(copy.deepcopy(self._) if deep else copy.copy(self._))", "def copy(self):\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp", "def copy (self):\n return self.__class__(self.name, self[:])", "def clone(self) -> Any:\n return cp.copy(self)", "def test_deepcopy(self):\n t = Precision()\n t.transform([2])\n copy.deepcopy(t)", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())", "def copy(self):\n new = self\n return new", "def copy(self):\n\n return deepcopy(self)", "def copy(self):\n\n return deepcopy(self)", "def copy(self):\n\n return deepcopy(self)", "def _shallow_clone_dataset(self: TAvalancheDataset) -> TAvalancheDataset:\n dataset_copy = copy.copy(self)\n dataset_copy._flat_data = self._flat_data._shallow_clone_dataset()\n return dataset_copy", "def clone(self):\n return copy.deepcopy(self)", "def copy(self):\n return super().copy()", "def clone(self):", "def copy(self):\n copy = self.__class__()\n copy.a = self.a\n copy.b = self.b\n copy.peak = self.peak\n copy.orientation = self.orientation\n copy.i = self.i\n copy.coords = self.coords.copy()\n return copy", "def __deepcopy__(self, memodict=None):\n return self.copy()", "def copy(self):\n return self.from_builder(self)", "def copy(self):\n return self.__class__(*self.sets)", "def test_deepcopy(self):\n t = Quantize()\n t.transform([2])\n copy.deepcopy(t)", "def copy(self):\n return self.__class__(dict(self))", "def copy (self, **kwargs):\n out = copy.deepcopy (self)\n out.update (**kwargs)\n return out", "def _shallow_clone_dataset(self: TDataWTransform) -> TDataWTransform:\n dataset_copy = copy.copy(self)\n dataset_copy._transform_groups = copy.copy(dataset_copy._transform_groups)\n dataset_copy._frozen_transform_groups = copy.copy(\n dataset_copy._frozen_transform_groups\n )\n return dataset_copy", "def copy(\n self\n ) -> AssignationProxy:\n assign = self.assignation\n copied = copy.copy(assign)\n copied.obj = copy.deepcopy(assign.obj)\n copied.range_obj = copy.deepcopy(assign.range_obj)\n copied.workshift_proxy = assign.workshift_proxy\n copied.person = assign.person\n copied.obj.id = None\n return copied", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self):\n copied = super().copy()\n copied.anonymize()\n return copied" ]
[ "0.87317777", "0.80810803", "0.79437435", "0.79085433", "0.7848419", "0.78046805", "0.7762016", "0.7748258", "0.77443546", "0.774237", "0.774237", "0.774237", "0.772365", "0.7720509", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.76456845", "0.7644182", "0.76388586", "0.7593062", "0.75823355", "0.7567763", "0.75669354", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7564859", "0.7510276", "0.7510121", "0.74852467", "0.74852467", "0.74852467", "0.7459295", "0.7454319", "0.7454319", "0.7435544", "0.7435544", "0.7435544", "0.7435544", "0.74281454", "0.7423686", "0.74152994", "0.7402962", "0.73893356", "0.73893356", "0.73893356", "0.73893356", "0.73881274", "0.738116", "0.7377632", "0.73598486", "0.7349913", "0.7344147", "0.7324276", "0.7324276", "0.73130995", "0.7306809", "0.73029613", "0.73029613", "0.73029613", "0.7283483", "0.72808087", "0.7273924", "0.72645295", "0.724912", "0.72456664", "0.7237821", "0.7230071", "0.722926", "0.72231585", "0.7212988", "0.7204005", "0.71956354", "0.7187188", "0.7187188", "0.7187188", "0.7161745" ]
0.0
-1
sha256.sha256 doesn't implement pickle
def __deepcopy__(self, memo=None) -> 'Rune': return Rune(self.shaobj.state[0], copy.deepcopy(self.restrictions))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SHA256(self) -> _n_0_t_3[_n_0_t_9]:", "def hash(obj):\n \n import hashlib\n import pickle\n \n sha = hashlib.sha256()\n sha.update(pickle.dumps(obj))\n \n return sha.hexdigest()", "def pickle_and_hash(obj: Any) -> str:\n try:\n s = dill.dumps(obj)\n except:\n raise UnpickleableError()\n\n return hashlib.sha512(s).hexdigest()", "def object_sha256(obj):\n\n return hashlib.sha256(json.dumps(obj).encode()).hexdigest()", "def hash(self) -> bytes:", "def default_sha256(key: KeyT, *args, **kwargs) -> bytes:\n return sha256(key).digest() # type: ignore", "def sha256(self):\n return self._sha256", "def hash_bytes_256(b: bytes) -> str:\n return hashlib.sha256(b).hexdigest()", "def sha256(self):\n return sha256file(self.abspath)", "def create_hash(*args):\n challenge_str = jsonpickle.encode(args)\n challenge_hash = hashlib.sha256(challenge_str.encode())\n return Bn.from_binary(challenge_hash.digest())", "def sha256(ctx, salt=\"\"):\n if ctx.data:\n salted_input_value = salt + \":\" + ctx.data\n ctx.data = hashlib.sha256(salted_input_value.encode()).hexdigest()\n else:\n raise RefError(\n \"Ref error: eval_func: nothing to sha256 hash; try \" \"something like '|random:str|sha256'\"\n )", "def hashable(obj):\n return bytes(str(obj), \"utf-8\")", "def sha256(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)\n d.update(data)\n return d.digest()", "def sha256(self):\n return self.sha256checksums()", "def sha256(value):\n return hashlib.sha256(value).hexdigest()", "def hash(self) -> str:\r\n ...", "def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result", "def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result", "def get_data_hash(args):\n pass", "def Sha256(data: Union[bytes, str]) -> bytes:\n return hashlib.sha256(AlgoUtils.Encode(data)).digest()", "def sha256(message: bytes):\n # convert message bitarray\n bit_msg = bitarray(endian='big')\n bit_msg.frombytes(message)\n L = len(bit_msg)\n\n # additions done mod 2^32\n pow2 = pow(2,32)\n\n # append 1 followed by K 0s where K is the minimum number >= 0 such that \n # len(bit_msg) + 1 + K + 64 is a multiple of 512\n bit_msg = bit_msg + bitarray('1') + (bitarray('0') * ((-L-65) % 512))\n # append len(bit_msg) as a 64-bit int to bit_msg\n bit_msg = bit_msg + util.int2ba(L, length=64, endian='big')\n\n # initialize hash to predefined values\n current_hash = [h for h in initial_hash]\n\n # operate on each 512-bit chunk\n for chunk_index in range(len(bit_msg)//512):\n chunk = bit_msg[chunk_index * 512 : (chunk_index+1) * 512]\n # w is array of 64 32-bit words with first 16 equal to chunk\n w = [chunk[i*32 : (i+1)*32] for i in range(16)]\n w.extend([bitarray(32) for _ in range(48)])\n # create last 48 words in w from first 16\n for i in range(16, 64):\n s0 = rightrotate(w[i-15], 7) ^ rightrotate(w[i-15], 18) ^ rightshift(w[i-15], 3)\n s1 = rightrotate(w[i-2], 17) ^ rightrotate(w[i-2], 19) ^ rightshift(w[i-2], 10)\n w[i] = int2ba32(sum(map(util.ba2int, [w[i-16], s0, w[i-7], s1])) % pow2)\n\n # copy current hash (stored in hex) into working list v as bitarrays\n v = list(map(int2ba32, current_hash))\n # compression\n for i in range(64):\n S1 = rightrotate(v[4], 6) ^ rightrotate(v[4], 11) ^ rightrotate(v[4], 25)\n ch = (v[4] & v[5]) ^ ((~v[4]) & v[6])\n temp1 = (constants[i] + sum(map(util.ba2int, [v[7], S1, ch, w[i]]))) % pow2\n S0 = rightrotate(v[0], 2) ^ rightrotate(v[0], 13) ^ rightrotate(v[0], 22)\n maj = (v[0] & v[1]) ^ (v[0] & v[2]) ^ (v[1] & v[2])\n temp2 = (util.ba2int(S0) + util.ba2int(maj)) % pow2\n\n # shift elements of v by 1\n for j in reversed(range(1, len(v))):\n v[j] = v[j-1]\n v[0] = int2ba32((temp1 + temp2) % pow2)\n v[4] = int2ba32((util.ba2int(v[4]) + temp1) % pow2)\n\n # add compressed values (which are bitarrays) to current_hash (which are ints)\n current_hash = list(map(lambda a,b: (a + util.ba2int(b)) % pow2, current_hash, v))\n\n # each entry of current_hash is a 32-bit integer so convert to 4 bytes \n # adding bytes appends them\n return b''.join(x.to_bytes(4, 'big') for x in current_hash)", "def sha256(content):\n content = content.encode('utf-8')\n return hashlib.sha256(content).hexdigest()", "def sha3_256(x):\n return hashlib.sha3_256(x).digest()", "def _Hash(content: bytes) -> str:\n return hashlib.sha256(content).hexdigest()", "def deep_hash(obj):\n pass", "def hash_key(self):", "def _sha256(sha256):\n if not sha256:\n sha256 = \"0\" * 64\n\n return sha256", "def hash_block(self):\n # TODO : Refactor the algorithm and improve it. This method only does basic things\n block_string = pickle.dumps(self)\n block_hash = hashlib.sha3_256(block_string).digest()\n # The above lines converts the object into __str__() representation and hashes it using sha3_256 algorithm.\n return block_hash", "def test_hash_sha256(self):\n block = self.blockchain.new_block(self.proof, self.previous_hash)\n hash_ = self.blockchain.hash(block)\n\n self.assertIsInstance(hash_, str)\n self.assertEqual(hashlib.sha256(json.dumps(block, sort_keys=True).encode()).hexdigest(), hash_)", "def sha3_256(data=None):\n return SpongeHash(512, 256, data, \"SHA3-256\", KeccakSponge, PAD_SHA3)", "def __hash__(self):\n return hash(tuple(self.sig))", "def pickle(obj):\n return pickletools.optimize(cPickle.dumps(obj))", "def __hash__(self):\n return hash(self.hash)", "def hash(self):\n raise NotImplementedError() # To be subclassed", "def get_hash(self):\r\n return", "def get_hash(self):\n return self.__hash", "def sha256(self, sha256):\n\n self._sha256 = sha256", "def regist_hash(cobj, hash, handler, dir):\n pass", "def sha256(key: bytes, buffer: Optional[bytes] = None) -> Hmac:\n return new(key, buffer, \"sha256\")", "def sha256(s: str) -> str:\n return hashlib.sha256(s.encode()).hexdigest()", "def hashcode(o):", "def __Hash(self):\n return self._Hash()", "def hash_sbox(f):\n hf = sha256()\n for x in f:\n hf.update(hex(x).encode('utf-8'))\n return hf.hexdigest()", "def current_hash(self):", "def hashing(word) :\r\n ans = hashlib.sha256(word.encode())\r\n return ans.hexdigest()", "def generate_sha256_hash(fpath, sig_key=None):\n return run(fpath, sig_key)", "def soft_hash(p):\n return tuple(map(r_soft_hash, p))", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def __init__(self, seed=None):\n self.seed(seed)\n self.hashfun = \"SHA-256\"\n self._basehash()", "def sha256(cls, value):\n assert type(value) is str\n return int(sha256(value.encode()).hexdigest(), 16)", "def HashAlgorithm(self) -> _n_7_t_0:", "def test__pickle_unpickle(self):\n pass", "def get_partial_sha256(self, nbytes):\n return sha256file(abspath=self.abspath, nbytes=nbytes)", "def existing_hash(self, id):\r\n return self._read_sha_by_id(id)", "def sha256(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sha256\")", "def get_results_hash(self, data):\n data = json.dumps(data, sort_keys=True)\n result = hashlib.sha512(data.encode())\n result_hash = result.hexdigest()\n return result_hash", "def hash(self) -> types.UInt256:\n with serialization.BinaryWriter() as bw:\n bw.write_uint32(settings.network.magic)\n self.serialize_unsigned(bw)\n data_to_hash = bytearray(bw._stream.getvalue())\n data = hashlib.sha256(hashlib.sha256(data_to_hash).digest()).digest()\n return types.UInt256(data=data)", "def stop_hash(cobj, hash):\n pass", "def get_file_sha256(fname):\n with open(fname, 'rb') as afile:\n return base64.b64encode(get_file_hash(afile, hashlib.sha256()))", "def test_default_sha256_bytes(self):\n this_is_a_test = [\n 10244166640140130606,\n 5650905005272240665,\n 14215057275609328422,\n 5952353080197385534,\n 4990779931033217093,\n ]\n this_is_also = [\n 4140421647067018332,\n 9306548247555387104,\n 5672713771950536751,\n 8501641957786831066,\n 15146689942378126332,\n ]\n hashes = default_sha256(b\"this is a test\", 5)\n self.assertEqual(hashes, this_is_a_test)\n hashes = default_sha256(b\"this is also a test\", 5)\n self.assertEqual(hashes, this_is_also)", "def sha256(path: Union[Path, str]) -> str:\n # The file shouldn't be too big to load into memory, so be lazy.\n with open(path, 'rb') as fp:\n data = fp.read()\n m = hashlib.sha256()\n m.update(data)\n return m.hexdigest()", "def hash(self) -> bytes:\n block_string = json.dumps(self.serialize(), sort_keys=True).encode()\n return bytes.fromhex(hashlib.sha256(block_string).hexdigest())", "def __hash__( self ):\n return hash( self.data )", "def sha_init(self):\n pass", "def GetHashKey(self, key):\r\n data = pickle.dumps(key)\r\n hashObject = hashlib.sha1(data)\r\n hashValue = hashObject.hexdigest()\r\n value = int(hashValue, 16)\r\n return value", "def __hash__(self):\n hash(self.components)", "def __hash__(self):\r\n return hash(f'{self.job_id},{self.job_size},{self.priority}')", "def __init__(self,key):\n self.block_size = 32\n self.key = hashlib.sha256(key).digest()", "def _fix_sha256(self):\n logger.info('fix package hashes: started')\n\n self._incomplete_entries = [entry for key, entry in self.walk() if entry.hash is None]\n\n physical_keys = []\n sizes = []\n for entry in self._incomplete_entries:\n physical_keys.append(entry.physical_key)\n sizes.append(entry.size)\n\n results = calculate_sha256(physical_keys, sizes)\n exc = None\n for entry, obj_hash in zip(self._incomplete_entries, results):\n if isinstance(obj_hash, Exception):\n exc = obj_hash\n else:\n entry.hash = dict(type='SHA256', value=obj_hash)\n if exc:\n incomplete_manifest_path = self._dump_manifest_to_scratch()\n msg = \"Unable to reach S3 for some hash values. Incomplete manifest saved to {path}.\"\n raise PackageException(msg.format(path=incomplete_manifest_path)) from exc\n\n logger.info('fix package hashes: finished')", "def get_hash(self):\n return freeze_dict(self.get_hash_params())", "def __hash__(self) -> int:\n ...", "def __hash__(self) -> int:\n ...", "def hashkey(obj, salt=0):\n if isinstance(obj, str):\n return zlib.adler32(obj.encode(), salt) & 0xffffffff\n elif isinstance(obj, bytes):\n return zlib.adler32(obj, salt) & 0xffffffff\n elif isinstance(obj, datetime_type):\n return zlib.adler32(str(obj).encode(), salt) & 0xffffffff\n return hash(obj) & 0xffffffff", "def __hash__(self):\n raise NotImplementedError", "def hash_of_file(path):\n with open(path, 'rb') as archive:\n sha = sha256()\n while True:\n data = archive.read(2 ** 20)\n if not data:\n break\n sha.update(data)\n return encoded_hash(sha)", "def get_sha256_hash(key, size=None):\n partition_hash = hashlib.sha256()\n for part in key:\n partition_hash.update(str(part).encode('utf-8'))\n sha256_hash = partition_hash.hexdigest()\n if not size or size > len(sha256_hash):\n size = len(sha256_hash)\n return sha256_hash[:size]", "def test_can_pickle(self):\n settings = UploadSettings(None, FakeDataServiceApi(), None, ProjectNameOrId.create_from_name('mouse'), None)\n params = ('one', 'two', 'three')\n context = UploadContext(settings, params, multiprocessing.Manager().Queue(), 12)\n pickle.dumps(context)", "def decode_minhash(buf: str) -> LeanMinHash:\n return pickle.loads(base64.b64decode(buf.encode(\"utf-8\")))", "def b32hash(s):\n _hash = hashlib.sha256()\n _hash.update(str2bytes(s))\n return bytes2str(b32encode(_hash.digest()))", "def __reduce_ex__(self, protocol):\n return (_safe_pickle_load, (self.__module__, self.__class__.__name__, self.name))", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def hash_file_sha256(file_path, binary=False, buffer_size=65536):\n return hash_file(file_path, hash_type=hashlib.sha256, binary=binary, buffer_size=buffer_size)", "def get_sha256_file(filename):\n BLOCKSIZE = 65536\n hasher = hashlib.sha256()\n with open(filename, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return hasher.hexdigest()", "def internal_hash(self): \n return hash(tuple(sorted(self.hashtriples())))", "def _Hash(self):\n fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)\n return util.Encode(fullhash[:keyczar.KEY_HASH_SIZE])", "def shake256(data=None, digest_size=512):\n return SpongeHash(512, digest_size, data, \"SHAKE256\", KeccakSponge, PAD_SHAKE)", "def hash(password):\n return sha256_crypt.encrypt(password)", "def hash256_result(func):\n\n @wraps(func)\n def _decorator(*args, **kwargs):\n val = func(*args, **kwargs)\n if val is None:\n raise ValueError('Return value is None')\n if not isinstance(val, str):\n raise ValueError('Return value is not string')\n if not val:\n return val\n hash_object = hashlib.sha256(val.encode('utf-8'))\n return str(hash_object.hexdigest())\n\n return _decorator", "def serialize(obj):\n return pickle.dumps(obj)", "def __hash__(self) -> int:", "def __hash__(self):\n return hash(self.base_location) ^ hash(self.fold_path) ^ hash(self.field)", "def test_serialization(self):\n for hashtype in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:\n self.do_test_serialization(hashtype)", "def hash_functions(self):\n pass", "def fast_hash(infile):\n\n m = hashlib.sha256()\n with open(infile, 'rb', 1024 * 1024) as f:\n l = f.read(1024 * 1024)\n while (len(l) > 0):\n m.update(l)\n f.seek(1024 * 1024 * (512 - 1), 1)\n l = f.read(1024 * 1024)\n return m.hexdigest()", "def __hash__(self):\n return self.to_hash()", "def hash(x) -> int:\n pass", "def sha256_encode(text):\n _hash = hashlib.sha256\n if type(text) is str:\n return _hash(text.encode('utf8')).digest()\n elif type(text) is bytes:\n return _hash(text).digest()\n elif not text:\n # Generally for calls where the payload is empty. Eg: get calls\n # Fix for AttributeError: 'NoneType' object has no attribute 'encode'\n return _hash(\"\".encode('utf8')).digest()\n else:\n return _hash(str(text).encode('utf-8')).digest()", "def __hash__(self):\n return hash(str(self.key))", "def __hash__(self):\n return hash(str(self.key))", "def internal_hash(self):\r\n return _TripleCanonicalizer(self).to_hash()", "def instance(data):\n return Fieldsha1(data)" ]
[ "0.68457127", "0.6561299", "0.6373584", "0.62677026", "0.62606", "0.603708", "0.60351026", "0.60149497", "0.5985192", "0.5962576", "0.5938484", "0.59240323", "0.58729625", "0.5839949", "0.582465", "0.5806277", "0.57929254", "0.57929254", "0.57687783", "0.5719801", "0.57143664", "0.5701857", "0.56877154", "0.568099", "0.5676611", "0.5674566", "0.5650916", "0.56447226", "0.56424695", "0.5639489", "0.56377345", "0.563105", "0.56183285", "0.5616003", "0.5608768", "0.56006324", "0.55977774", "0.5585709", "0.5579147", "0.5573625", "0.5567041", "0.5566831", "0.55651045", "0.55605376", "0.5557231", "0.5552219", "0.55481", "0.55340993", "0.55323046", "0.55199814", "0.55028105", "0.54955626", "0.54785234", "0.5463525", "0.54595655", "0.5453362", "0.5444524", "0.5440552", "0.5431557", "0.5419018", "0.5417787", "0.5413398", "0.54132384", "0.5403609", "0.5401601", "0.5372613", "0.5371141", "0.536991", "0.53693706", "0.53675264", "0.5365491", "0.5365491", "0.536448", "0.53576875", "0.53545034", "0.53496855", "0.5345804", "0.53403115", "0.5332697", "0.53157014", "0.5313689", "0.53131026", "0.5310792", "0.52964836", "0.529613", "0.52922666", "0.5290778", "0.5285533", "0.5283059", "0.5269814", "0.5259598", "0.5257272", "0.5256701", "0.52481234", "0.524291", "0.5240606", "0.5231108", "0.5230684", "0.5230684", "0.52285314", "0.5222831" ]
0.0
-1
Perform a shallow copy
def copy(self) -> 'Rune': return self.__copy__()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shallow_copy(self):\n # TODO: Rename this to __copy__()?\n raise NotImplementedError(\"shallow_copy is not implemented\")", "def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)", "def copy(self):", "def _copy_(self):\n return copy.copy(self)", "def copy(self):\n\t\treturn pythoncopy.deepcopy(self)", "def __copy__(self):\n return self.copy()", "def copy(self):\n from copy import deepcopy\n return deepcopy(self)", "def __copy__(self, *args, **kwargs):\n return self.copy()", "def deepcopy(self):\n return copymod.deepcopy(self)", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):\r\n return copy.deepcopy(self)", "def deepcopy(self):\n return self.copy()", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy (self):\n import copy\n return copy.copy(self)", "def deepcopy(self):\n return copy.deepcopy(self)", "def copy(self):\r\n return copy.copy(self)", "def clone(self):\n return shallow_clone(self)", "def copy(self):\n return self.mutate().simple_copy()", "def deep_copy(self):\n return self.__class__(self.inputs, self.outputs, self.middle)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n \n return deepcopy(self)", "def copy(self):\n cpy = deepcopy(self)\n # usually we use copy to perform transformations on the board\n # so it's good to reset memoized values\n cpy._memoized_compact = None \n return cpy", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def __copy__(self):\n raise NotImplementedError", "def copy(self):\n return copy(self)", "def copy(self):\n return copy(self)", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def test_deepcopy(self):\n t = Identity()\n t.transform([2])\n copy.deepcopy(t)", "def clone(self):\n from copy import deepcopy\n return deepcopy(self)", "def __copy__(self):\n return self.__class__(self.m, self.n, self.data)", "def test_copy(self):\n data = [[0, 1], [1, 0]]\n b1 = Board(data)\n b2 = b1.copy()\n # test if proper copy\n self.assertListEqual(b1.data, b2.data)\n # teset if not just a shallow copy\n b1.data[0][0] = 1\n self.assertNotEqual(b1.data[0][0], b2.data[0][0])", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def copy(self):\n try:\n return self.__class__(self, copy=True)\n except TypeError:\n new = self.__class__(copy.deepcopy(self))\n return new", "def copy(self, deep=False):\n return _(copy.deepcopy(self._) if deep else copy.copy(self._))", "def copy(self):\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp", "def copy (self):\n return self.__class__(self.name, self[:])", "def clone(self) -> Any:\n return cp.copy(self)", "def test_deepcopy(self):\n t = Precision()\n t.transform([2])\n copy.deepcopy(t)", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())", "def copy(self):\n new = self\n return new", "def copy(self):\n\n return deepcopy(self)", "def copy(self):\n\n return deepcopy(self)", "def copy(self):\n\n return deepcopy(self)", "def _shallow_clone_dataset(self: TAvalancheDataset) -> TAvalancheDataset:\n dataset_copy = copy.copy(self)\n dataset_copy._flat_data = self._flat_data._shallow_clone_dataset()\n return dataset_copy", "def clone(self):\n return copy.deepcopy(self)", "def copy(self):\n return super().copy()", "def clone(self):", "def copy(self):\n copy = self.__class__()\n copy.a = self.a\n copy.b = self.b\n copy.peak = self.peak\n copy.orientation = self.orientation\n copy.i = self.i\n copy.coords = self.coords.copy()\n return copy", "def __deepcopy__(self, memodict=None):\n return self.copy()", "def copy(self):\n return self.from_builder(self)", "def copy(self):\n return self.__class__(*self.sets)", "def test_deepcopy(self):\n t = Quantize()\n t.transform([2])\n copy.deepcopy(t)", "def copy(self):\n return self.__class__(dict(self))", "def copy (self, **kwargs):\n out = copy.deepcopy (self)\n out.update (**kwargs)\n return out", "def _shallow_clone_dataset(self: TDataWTransform) -> TDataWTransform:\n dataset_copy = copy.copy(self)\n dataset_copy._transform_groups = copy.copy(dataset_copy._transform_groups)\n dataset_copy._frozen_transform_groups = copy.copy(\n dataset_copy._frozen_transform_groups\n )\n return dataset_copy", "def copy(\n self\n ) -> AssignationProxy:\n assign = self.assignation\n copied = copy.copy(assign)\n copied.obj = copy.deepcopy(assign.obj)\n copied.range_obj = copy.deepcopy(assign.range_obj)\n copied.workshift_proxy = assign.workshift_proxy\n copied.person = assign.person\n copied.obj.id = None\n return copied", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self):\n copied = super().copy()\n copied.anonymize()\n return copied" ]
[ "0.8731979", "0.80809754", "0.79425305", "0.79085165", "0.7848546", "0.7804359", "0.7762183", "0.77476215", "0.7744951", "0.77417994", "0.77417994", "0.77417994", "0.77240235", "0.7720835", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76460147", "0.76444256", "0.7639214", "0.75933605", "0.7582122", "0.75679433", "0.7567484", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.7565317", "0.75107324", "0.75094426", "0.74855167", "0.74855167", "0.74855167", "0.7458669", "0.74544734", "0.74544734", "0.74355763", "0.74355763", "0.74355763", "0.74355763", "0.74276066", "0.7423587", "0.74152535", "0.7402399", "0.7389582", "0.7389582", "0.7389582", "0.7389582", "0.73889214", "0.7381127", "0.7378018", "0.7360387", "0.73501587", "0.7343114", "0.7324458", "0.7324458", "0.7311908", "0.73069614", "0.7303454", "0.7303454", "0.7303454", "0.7282917", "0.7280866", "0.72729945", "0.7262871", "0.7248735", "0.7245945", "0.7237934", "0.7230556", "0.7227782", "0.72231567", "0.7213406", "0.72034997", "0.7196256", "0.7186447", "0.7186447", "0.7186447", "0.7160906" ]
0.0
-1
sha256.sha256 doesn't implement pickle
def __deepcopy__(self, memo=None) -> 'MasterRune': ret = MasterRune(bytes()) ret.restrictions = copy.deepcopy(self.restrictions) ret.shaobj.state = self.shaobj.state ret.shabase = self.shabase ret.seclen = self.seclen return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SHA256(self) -> _n_0_t_3[_n_0_t_9]:", "def hash(obj):\n \n import hashlib\n import pickle\n \n sha = hashlib.sha256()\n sha.update(pickle.dumps(obj))\n \n return sha.hexdigest()", "def pickle_and_hash(obj: Any) -> str:\n try:\n s = dill.dumps(obj)\n except:\n raise UnpickleableError()\n\n return hashlib.sha512(s).hexdigest()", "def object_sha256(obj):\n\n return hashlib.sha256(json.dumps(obj).encode()).hexdigest()", "def hash(self) -> bytes:", "def default_sha256(key: KeyT, *args, **kwargs) -> bytes:\n return sha256(key).digest() # type: ignore", "def sha256(self):\n return self._sha256", "def hash_bytes_256(b: bytes) -> str:\n return hashlib.sha256(b).hexdigest()", "def sha256(self):\n return sha256file(self.abspath)", "def create_hash(*args):\n challenge_str = jsonpickle.encode(args)\n challenge_hash = hashlib.sha256(challenge_str.encode())\n return Bn.from_binary(challenge_hash.digest())", "def sha256(ctx, salt=\"\"):\n if ctx.data:\n salted_input_value = salt + \":\" + ctx.data\n ctx.data = hashlib.sha256(salted_input_value.encode()).hexdigest()\n else:\n raise RefError(\n \"Ref error: eval_func: nothing to sha256 hash; try \" \"something like '|random:str|sha256'\"\n )", "def hashable(obj):\n return bytes(str(obj), \"utf-8\")", "def sha256(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)\n d.update(data)\n return d.digest()", "def sha256(self):\n return self.sha256checksums()", "def sha256(value):\n return hashlib.sha256(value).hexdigest()", "def hash(self) -> str:\r\n ...", "def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result", "def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result", "def get_data_hash(args):\n pass", "def Sha256(data: Union[bytes, str]) -> bytes:\n return hashlib.sha256(AlgoUtils.Encode(data)).digest()", "def sha256(message: bytes):\n # convert message bitarray\n bit_msg = bitarray(endian='big')\n bit_msg.frombytes(message)\n L = len(bit_msg)\n\n # additions done mod 2^32\n pow2 = pow(2,32)\n\n # append 1 followed by K 0s where K is the minimum number >= 0 such that \n # len(bit_msg) + 1 + K + 64 is a multiple of 512\n bit_msg = bit_msg + bitarray('1') + (bitarray('0') * ((-L-65) % 512))\n # append len(bit_msg) as a 64-bit int to bit_msg\n bit_msg = bit_msg + util.int2ba(L, length=64, endian='big')\n\n # initialize hash to predefined values\n current_hash = [h for h in initial_hash]\n\n # operate on each 512-bit chunk\n for chunk_index in range(len(bit_msg)//512):\n chunk = bit_msg[chunk_index * 512 : (chunk_index+1) * 512]\n # w is array of 64 32-bit words with first 16 equal to chunk\n w = [chunk[i*32 : (i+1)*32] for i in range(16)]\n w.extend([bitarray(32) for _ in range(48)])\n # create last 48 words in w from first 16\n for i in range(16, 64):\n s0 = rightrotate(w[i-15], 7) ^ rightrotate(w[i-15], 18) ^ rightshift(w[i-15], 3)\n s1 = rightrotate(w[i-2], 17) ^ rightrotate(w[i-2], 19) ^ rightshift(w[i-2], 10)\n w[i] = int2ba32(sum(map(util.ba2int, [w[i-16], s0, w[i-7], s1])) % pow2)\n\n # copy current hash (stored in hex) into working list v as bitarrays\n v = list(map(int2ba32, current_hash))\n # compression\n for i in range(64):\n S1 = rightrotate(v[4], 6) ^ rightrotate(v[4], 11) ^ rightrotate(v[4], 25)\n ch = (v[4] & v[5]) ^ ((~v[4]) & v[6])\n temp1 = (constants[i] + sum(map(util.ba2int, [v[7], S1, ch, w[i]]))) % pow2\n S0 = rightrotate(v[0], 2) ^ rightrotate(v[0], 13) ^ rightrotate(v[0], 22)\n maj = (v[0] & v[1]) ^ (v[0] & v[2]) ^ (v[1] & v[2])\n temp2 = (util.ba2int(S0) + util.ba2int(maj)) % pow2\n\n # shift elements of v by 1\n for j in reversed(range(1, len(v))):\n v[j] = v[j-1]\n v[0] = int2ba32((temp1 + temp2) % pow2)\n v[4] = int2ba32((util.ba2int(v[4]) + temp1) % pow2)\n\n # add compressed values (which are bitarrays) to current_hash (which are ints)\n current_hash = list(map(lambda a,b: (a + util.ba2int(b)) % pow2, current_hash, v))\n\n # each entry of current_hash is a 32-bit integer so convert to 4 bytes \n # adding bytes appends them\n return b''.join(x.to_bytes(4, 'big') for x in current_hash)", "def sha256(content):\n content = content.encode('utf-8')\n return hashlib.sha256(content).hexdigest()", "def sha3_256(x):\n return hashlib.sha3_256(x).digest()", "def _Hash(content: bytes) -> str:\n return hashlib.sha256(content).hexdigest()", "def deep_hash(obj):\n pass", "def hash_key(self):", "def _sha256(sha256):\n if not sha256:\n sha256 = \"0\" * 64\n\n return sha256", "def hash_block(self):\n # TODO : Refactor the algorithm and improve it. This method only does basic things\n block_string = pickle.dumps(self)\n block_hash = hashlib.sha3_256(block_string).digest()\n # The above lines converts the object into __str__() representation and hashes it using sha3_256 algorithm.\n return block_hash", "def test_hash_sha256(self):\n block = self.blockchain.new_block(self.proof, self.previous_hash)\n hash_ = self.blockchain.hash(block)\n\n self.assertIsInstance(hash_, str)\n self.assertEqual(hashlib.sha256(json.dumps(block, sort_keys=True).encode()).hexdigest(), hash_)", "def sha3_256(data=None):\n return SpongeHash(512, 256, data, \"SHA3-256\", KeccakSponge, PAD_SHA3)", "def __hash__(self):\n return hash(tuple(self.sig))", "def pickle(obj):\n return pickletools.optimize(cPickle.dumps(obj))", "def __hash__(self):\n return hash(self.hash)", "def hash(self):\n raise NotImplementedError() # To be subclassed", "def get_hash(self):\r\n return", "def get_hash(self):\n return self.__hash", "def sha256(self, sha256):\n\n self._sha256 = sha256", "def regist_hash(cobj, hash, handler, dir):\n pass", "def sha256(key: bytes, buffer: Optional[bytes] = None) -> Hmac:\n return new(key, buffer, \"sha256\")", "def sha256(s: str) -> str:\n return hashlib.sha256(s.encode()).hexdigest()", "def hashcode(o):", "def __Hash(self):\n return self._Hash()", "def hash_sbox(f):\n hf = sha256()\n for x in f:\n hf.update(hex(x).encode('utf-8'))\n return hf.hexdigest()", "def current_hash(self):", "def hashing(word) :\r\n ans = hashlib.sha256(word.encode())\r\n return ans.hexdigest()", "def generate_sha256_hash(fpath, sig_key=None):\n return run(fpath, sig_key)", "def soft_hash(p):\n return tuple(map(r_soft_hash, p))", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def __init__(self, seed=None):\n self.seed(seed)\n self.hashfun = \"SHA-256\"\n self._basehash()", "def sha256(cls, value):\n assert type(value) is str\n return int(sha256(value.encode()).hexdigest(), 16)", "def HashAlgorithm(self) -> _n_7_t_0:", "def test__pickle_unpickle(self):\n pass", "def get_partial_sha256(self, nbytes):\n return sha256file(abspath=self.abspath, nbytes=nbytes)", "def existing_hash(self, id):\r\n return self._read_sha_by_id(id)", "def sha256(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sha256\")", "def get_results_hash(self, data):\n data = json.dumps(data, sort_keys=True)\n result = hashlib.sha512(data.encode())\n result_hash = result.hexdigest()\n return result_hash", "def hash(self) -> types.UInt256:\n with serialization.BinaryWriter() as bw:\n bw.write_uint32(settings.network.magic)\n self.serialize_unsigned(bw)\n data_to_hash = bytearray(bw._stream.getvalue())\n data = hashlib.sha256(hashlib.sha256(data_to_hash).digest()).digest()\n return types.UInt256(data=data)", "def stop_hash(cobj, hash):\n pass", "def get_file_sha256(fname):\n with open(fname, 'rb') as afile:\n return base64.b64encode(get_file_hash(afile, hashlib.sha256()))", "def test_default_sha256_bytes(self):\n this_is_a_test = [\n 10244166640140130606,\n 5650905005272240665,\n 14215057275609328422,\n 5952353080197385534,\n 4990779931033217093,\n ]\n this_is_also = [\n 4140421647067018332,\n 9306548247555387104,\n 5672713771950536751,\n 8501641957786831066,\n 15146689942378126332,\n ]\n hashes = default_sha256(b\"this is a test\", 5)\n self.assertEqual(hashes, this_is_a_test)\n hashes = default_sha256(b\"this is also a test\", 5)\n self.assertEqual(hashes, this_is_also)", "def sha256(path: Union[Path, str]) -> str:\n # The file shouldn't be too big to load into memory, so be lazy.\n with open(path, 'rb') as fp:\n data = fp.read()\n m = hashlib.sha256()\n m.update(data)\n return m.hexdigest()", "def hash(self) -> bytes:\n block_string = json.dumps(self.serialize(), sort_keys=True).encode()\n return bytes.fromhex(hashlib.sha256(block_string).hexdigest())", "def __hash__( self ):\n return hash( self.data )", "def sha_init(self):\n pass", "def GetHashKey(self, key):\r\n data = pickle.dumps(key)\r\n hashObject = hashlib.sha1(data)\r\n hashValue = hashObject.hexdigest()\r\n value = int(hashValue, 16)\r\n return value", "def __hash__(self):\n hash(self.components)", "def __hash__(self):\r\n return hash(f'{self.job_id},{self.job_size},{self.priority}')", "def __init__(self,key):\n self.block_size = 32\n self.key = hashlib.sha256(key).digest()", "def _fix_sha256(self):\n logger.info('fix package hashes: started')\n\n self._incomplete_entries = [entry for key, entry in self.walk() if entry.hash is None]\n\n physical_keys = []\n sizes = []\n for entry in self._incomplete_entries:\n physical_keys.append(entry.physical_key)\n sizes.append(entry.size)\n\n results = calculate_sha256(physical_keys, sizes)\n exc = None\n for entry, obj_hash in zip(self._incomplete_entries, results):\n if isinstance(obj_hash, Exception):\n exc = obj_hash\n else:\n entry.hash = dict(type='SHA256', value=obj_hash)\n if exc:\n incomplete_manifest_path = self._dump_manifest_to_scratch()\n msg = \"Unable to reach S3 for some hash values. Incomplete manifest saved to {path}.\"\n raise PackageException(msg.format(path=incomplete_manifest_path)) from exc\n\n logger.info('fix package hashes: finished')", "def get_hash(self):\n return freeze_dict(self.get_hash_params())", "def __hash__(self) -> int:\n ...", "def __hash__(self) -> int:\n ...", "def hashkey(obj, salt=0):\n if isinstance(obj, str):\n return zlib.adler32(obj.encode(), salt) & 0xffffffff\n elif isinstance(obj, bytes):\n return zlib.adler32(obj, salt) & 0xffffffff\n elif isinstance(obj, datetime_type):\n return zlib.adler32(str(obj).encode(), salt) & 0xffffffff\n return hash(obj) & 0xffffffff", "def __hash__(self):\n raise NotImplementedError", "def hash_of_file(path):\n with open(path, 'rb') as archive:\n sha = sha256()\n while True:\n data = archive.read(2 ** 20)\n if not data:\n break\n sha.update(data)\n return encoded_hash(sha)", "def get_sha256_hash(key, size=None):\n partition_hash = hashlib.sha256()\n for part in key:\n partition_hash.update(str(part).encode('utf-8'))\n sha256_hash = partition_hash.hexdigest()\n if not size or size > len(sha256_hash):\n size = len(sha256_hash)\n return sha256_hash[:size]", "def test_can_pickle(self):\n settings = UploadSettings(None, FakeDataServiceApi(), None, ProjectNameOrId.create_from_name('mouse'), None)\n params = ('one', 'two', 'three')\n context = UploadContext(settings, params, multiprocessing.Manager().Queue(), 12)\n pickle.dumps(context)", "def decode_minhash(buf: str) -> LeanMinHash:\n return pickle.loads(base64.b64decode(buf.encode(\"utf-8\")))", "def b32hash(s):\n _hash = hashlib.sha256()\n _hash.update(str2bytes(s))\n return bytes2str(b32encode(_hash.digest()))", "def __reduce_ex__(self, protocol):\n return (_safe_pickle_load, (self.__module__, self.__class__.__name__, self.name))", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def hash_file_sha256(file_path, binary=False, buffer_size=65536):\n return hash_file(file_path, hash_type=hashlib.sha256, binary=binary, buffer_size=buffer_size)", "def get_sha256_file(filename):\n BLOCKSIZE = 65536\n hasher = hashlib.sha256()\n with open(filename, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return hasher.hexdigest()", "def internal_hash(self): \n return hash(tuple(sorted(self.hashtriples())))", "def _Hash(self):\n fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)\n return util.Encode(fullhash[:keyczar.KEY_HASH_SIZE])", "def shake256(data=None, digest_size=512):\n return SpongeHash(512, digest_size, data, \"SHAKE256\", KeccakSponge, PAD_SHAKE)", "def hash(password):\n return sha256_crypt.encrypt(password)", "def hash256_result(func):\n\n @wraps(func)\n def _decorator(*args, **kwargs):\n val = func(*args, **kwargs)\n if val is None:\n raise ValueError('Return value is None')\n if not isinstance(val, str):\n raise ValueError('Return value is not string')\n if not val:\n return val\n hash_object = hashlib.sha256(val.encode('utf-8'))\n return str(hash_object.hexdigest())\n\n return _decorator", "def serialize(obj):\n return pickle.dumps(obj)", "def __hash__(self) -> int:", "def __hash__(self):\n return hash(self.base_location) ^ hash(self.fold_path) ^ hash(self.field)", "def test_serialization(self):\n for hashtype in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:\n self.do_test_serialization(hashtype)", "def hash_functions(self):\n pass", "def fast_hash(infile):\n\n m = hashlib.sha256()\n with open(infile, 'rb', 1024 * 1024) as f:\n l = f.read(1024 * 1024)\n while (len(l) > 0):\n m.update(l)\n f.seek(1024 * 1024 * (512 - 1), 1)\n l = f.read(1024 * 1024)\n return m.hexdigest()", "def __hash__(self):\n return self.to_hash()", "def hash(x) -> int:\n pass", "def sha256_encode(text):\n _hash = hashlib.sha256\n if type(text) is str:\n return _hash(text.encode('utf8')).digest()\n elif type(text) is bytes:\n return _hash(text).digest()\n elif not text:\n # Generally for calls where the payload is empty. Eg: get calls\n # Fix for AttributeError: 'NoneType' object has no attribute 'encode'\n return _hash(\"\".encode('utf8')).digest()\n else:\n return _hash(str(text).encode('utf-8')).digest()", "def __hash__(self):\n return hash(str(self.key))", "def __hash__(self):\n return hash(str(self.key))", "def internal_hash(self):\r\n return _TripleCanonicalizer(self).to_hash()", "def instance(data):\n return Fieldsha1(data)" ]
[ "0.68457127", "0.6561299", "0.6373584", "0.62677026", "0.62606", "0.603708", "0.60351026", "0.60149497", "0.5985192", "0.5962576", "0.5938484", "0.59240323", "0.58729625", "0.5839949", "0.582465", "0.5806277", "0.57929254", "0.57929254", "0.57687783", "0.5719801", "0.57143664", "0.5701857", "0.56877154", "0.568099", "0.5676611", "0.5674566", "0.5650916", "0.56447226", "0.56424695", "0.5639489", "0.56377345", "0.563105", "0.56183285", "0.5616003", "0.5608768", "0.56006324", "0.55977774", "0.5585709", "0.5579147", "0.5573625", "0.5567041", "0.5566831", "0.55651045", "0.55605376", "0.5557231", "0.5552219", "0.55481", "0.55340993", "0.55323046", "0.55199814", "0.55028105", "0.54955626", "0.54785234", "0.5463525", "0.54595655", "0.5453362", "0.5444524", "0.5440552", "0.5431557", "0.5419018", "0.5417787", "0.5413398", "0.54132384", "0.5403609", "0.5401601", "0.5372613", "0.5371141", "0.536991", "0.53693706", "0.53675264", "0.5365491", "0.5365491", "0.536448", "0.53576875", "0.53545034", "0.53496855", "0.5345804", "0.53403115", "0.5332697", "0.53157014", "0.5313689", "0.53131026", "0.5310792", "0.52964836", "0.529613", "0.52922666", "0.5290778", "0.5285533", "0.5283059", "0.5269814", "0.5259598", "0.5257272", "0.5256701", "0.52481234", "0.524291", "0.5240606", "0.5231108", "0.5230684", "0.5230684", "0.52285314", "0.5222831" ]
0.0
-1
This is faster than adding the restrictions onebyone and checking the final authcode (but equivalent)
def is_rune_authorized(self, other: Rune) -> bool: # Make copy, as we're going to update state. sha = self.shabase.copy() totlen = self.seclen for r in other.restrictions: pad = end_shastream(totlen) sha.update(pad) totlen += len(pad) enc = bytes(r.encode(), encoding='utf8') sha.update(enc) totlen += len(enc) return other.authcode() == sha.digest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def check_auth():", "def verify_auth_code(self, code):\n raise NotImplementedError(\n \"\"\"\n verify_scope must be implemented by a child class\n \"\"\"\n )", "def auth_code_handler(self, request, pk=None):\n try:\n # Get xero auth access information form xero connection\n stored_values = OAUTH_PERSISTENT_SERVER_STORAGE\n\n\n if len(stored_values) == 0:\n return Utils.dispatch_failure(request, 'NO_TOKEN_AUTHENTICATION')\n\n secret_keys = Utils.get_access_keys(pk)\n if AccountingConfiguration.PRIVATE == secret_keys.type:\n exists = AccountingOauth2.objects.filter(company=pk).first()\n if not exists:\n auth = AccountingOauth2(accessToken=stored_values['consumer_key'],\n accessSecretKey=stored_values['rsa_key'],\n company_id=pk)\n auth.save()\n else:\n exists.accessToken = stored_values['consumer_key']\n exists.accessSecretKey = stored_values['rsa_key']\n exists.save()\n else:\n auth_verifier_uri = settings.XERO_AUTH_VERIFIER_URI\n oauth_verifier = request.GET.get('oauth_verifier')\n credentials = Utils.get_xero_public_credentials(stored_values)\n\n if credentials.expired():\n return Utils.dispatch_failure(request, 'NO_TOKEN_AUTHENTICATION')\n\n # Verify the auth verifier for establish the connection\n\n credentials.verify(oauth_verifier)\n # Resave our verified credentials\n for key, value in credentials.state.items():\n OAUTH_PERSISTENT_SERVER_STORAGE.update({key: value})\n\n stored_values = OAUTH_PERSISTENT_SERVER_STORAGE\n exists = AccountingOauth2.objects.filter(company=pk).first()\n\n if exists:\n exists.accessToken = stored_values['oauth_token']\n exists.realmId = oauth_verifier\n exists.accessSecretKey = stored_values['oauth_token_secret']\n exists.tokenAcitvatedOn = stored_values['oauth_expires_at']\n exists.tokenExpiryON = stored_values['oauth_authorization_expires_at']\n exists.save()\n else:\n auth = AccountingOauth2(accessToken=stored_values['oauth_token'],\n refreshToken='',\n realmId=oauth_verifier,\n accessSecretKey=stored_values['oauth_token_secret'],\n tokenAcitvatedOn=stored_values['oauth_expires_at'],\n tokenExpiryON=stored_values['oauth_authorization_expires_at'],\n company_id=pk)\n auth.save()\n # auth_redirect_url = os.environ.get ('QBO_AUTH_REDIRECT_URL',\n # 'http://localhost:4200/coa-match/quickbooks')\n\n # auth_redirect_url = os.environ.get ('QBO_AUTH_REDIRECT_URL','http://ec2-52-207-28-114.compute-1.amazonaws.com/ix/coa-match/quickbooks')\n\n # return redirect(auth_redirect_url)\n\n except Exception as e:\n auth_cancel_url = settings.QBO_AUTH_CANCEL_URL\n Utils.send_company_misconfig(pk, e)\n return redirect(auth_cancel_url + '/error')\n #return Utils.dispatch_success(request, 'TOKEN_ALREADY_VALIDATED')\n\n auth_redirect_url = settings.XERO_AUTH_REDIRECT_URL\n return redirect(auth_redirect_url)\n # return Utils.dispatch_success(request, stored_values)", "def checkCode():\n code = {'code': request.json['code'], 'phone': request.json['phone']}\n # if code and phone exist authorize user\n user = models.User.query.filter_by(code=code['code'], phone=code['phone']).first()\n value = \"User and SMS code don't exist\"\n if (user):\n user.is_verified = True\n value = \"Code exists!\"\n db.session.commit()\n resp = Response(json.dumps(value), status=200, mimetype='application/json')\n return resp", "def authn_and_authz():\n authentication()\n authorization()", "def check_authcode_params(self, request: HttpRequest, params: Iterable[str]):\n is_valid = True\n auth_code_calculation_values = [\n request.GET[param_name]\n for param_name in params\n if param_name in request.GET\n ]\n correct_auth_code = self.calculate_auth_code(\n \"|\".join(auth_code_calculation_values)\n )\n auth_code = request.GET[\"AUTHCODE\"]\n if not hmac.compare_digest(auth_code, correct_auth_code):\n logger.warning('Incorrect auth code \"{}\".'.format(auth_code))\n is_valid = False\n return is_valid", "def auth_check(phenny, nick, target=None):\n global auth_list\n if target == phenny.config.nick:\n return 0\n elif nick in auth_list:\n return 1", "def func_auth(self, data):\n check = bytes(data).decode().encode('ascii', 'ignore').decode().lower().rstrip()\n if check == 'auth login':\n auth_id = library.q_id_generate(size=12)\n message = '334 ' + auth_id\n self.func_sender(message)\n self.request.recv(self.std_recv_size)\n auth_id_two = library.q_id_generate(size=12)\n message_two = '334 ' + auth_id_two\n self.func_sender(message_two)\n self.request.recv(self.std_recv_size)\n message_three = self.conf_th_ic.get_item(q_key='std-messages').get(check)\n self.func_sender(message_three)\n return True", "def authenticator():", "def auth_verify(phenny, input):\n global auth_list\n nick = input.group(1)\n level = input.group(3)\n if input.nick != 'NickServ':\n return\n elif level == '3':\n if nick in auth_list:\n return\n else:\n auth_list.append(nick)\n else:\n if nick not in auth_list:\n return\n else:\n auth_list.remove(nick)", "async def submit_auth_code(self, code, make_permanent=True):\n if not code.isdecimal() or len(code) != 6:\n _LOGGER.error(\"2FA code must be 6 digits\")\n return False\n _LOGGER.info(\"Validating 2FA response\")\n post_data = {\n \"deviceId\": self._device_id,\n \"deviceName\": self._device_name,\n \"verificationCode\": code,\n }\n if make_permanent:\n post_data[\"rememberDevice\"] = \"on\"\n\n js_resp = await self.__open(API_2FA_AUTH_VERIFY, POST, params=post_data)\n if js_resp:\n _LOGGER.debug(pprint.pformat(js_resp))\n if js_resp[\"success\"]:\n _LOGGER.info(\"Device successfully authorized\")\n while not self._registered:\n # Device registration does not always immediately take effect\n await asyncio.sleep(3)\n await self._authenticate()\n # Current server side vin context is ambiguous (even for single vehicle account??)\n self._current_vin = None\n return True", "def auth_token(self):", "def url_contains_auth_code(url: str) -> bool:\n return url.count(\"code=\") == 1", "def _get_auth_string(self):", "def check_new_payment_authcode(self, request: HttpRequest):\n return self.check_authcode_params(\n request,\n (\n \"RETURN_CODE\",\n \"ORDER_NUMBER\",\n \"SETTLED\",\n \"CONTACT_ID\",\n \"INCIDENT_ID\",\n ),\n )", "def checkUuidAuth(uuid, auth_mode, user_token=None, return_doc=False, is_log_route=False) :\n\n print (\". \"*50)\n log_app.debug(\"checkUuidAuth / uuid : %s\", uuid )\n\n if is_log_route == False : \n\n ### get uuid auth document\n uuidsAuthColl = mongoConfigColls['uuids_auth']\n uuid_auth_doc_raw = uuidsAuthColl.find_one( {'apiviz_front_uuid' : uuid} )\n\n if uuid_auth_doc_raw : \n\n uuid_auth_doc = DocOidToString(uuid_auth_doc_raw)\n log_app.debug(\"checkUuidAuth / uuid_auth_doc : \\n%s\", pformat(uuid_auth_doc) )\n\n ### main condition : is uuid authorized or locked / blocked\n uuid_is_authorized = uuid_auth_doc['uuid_is_authorized']\n\n if return_doc :\n\n keys_not_in_trimmed_doc = ['added_by', 'logs', 'auth_role_users', '_id']\n trimmed_uuid_auth_doc = { key : val for key, val in uuid_auth_doc.items() if key not in keys_not_in_trimmed_doc }\n \n if user_token : \n ### return the whole uuid_auth document depending on user auth\n user_is_listed = isUserListed(user_token, uuid, auth_mode, uuid_auth_doc)\n\n if user_is_listed : \n return uuid_auth_doc\n\n else :\n return trimmed_uuid_auth_doc\n\n else : \n ### return the trimmed uuid_auth document for anonymous users\n return trimmed_uuid_auth_doc\n\n ### more checks if uuid is private\n # role_check = uuid_auth_doc['apiviz_options']['private_instance']\n role_check = uuid_auth_doc['private_instance']\n # log_app.debug(\"checkUuidAuth / role_check : %s\", role_check )\n\n if role_check and not return_doc :\n ### more checks if uuid is private\n\n # get checkJWT response\n user_is_listed = isUserListed(user_token, uuid, auth_mode, uuid_auth_doc)\n\n ### return a boolean\n return user_is_listed\n\n else : \n ### return a boolean\n return uuid_is_authorized\n\n else : \n ### return a boolean\n return {\n \"_id\" : None,\n \"apiviz_front_uuid\" : None,\n }\n\n else :\n return True", "def is_private(code):\n return 4000 <= code <= 4999", "def authenticate_user(authentication_code):\n\n for suffix in ('', '=', '=='):\n attempt = authentication_code + suffix\n decoded = base64.decodestring(attempt)\n fields = decoded.split('_')\n\n email, user_id, time_stamp, str_hex = fields\n\n if time_stamp < time.time():\n # Authentication Code Expired\n raise seerpod_exceptions.AuthenticationCodeExpired('Authentication code expired',\n response_data=authentication_code)\n user = None #business_contact_api.BusinessContacts().get_user_detail_from_email(email)\n\n if not user:\n continue\n\n if attempt == generate_authentication_code(\n user.id, time_stamp, user.owner_email_id, user.password):\n return user\n\n # Invalid authentication code\n raise seerpod_exceptions.InvalidAuthenticationCode('Invalid Authentication code',\n response_data=authentication_code)", "def verifyToken():\n if request:\n data = json.dumps(request.json)\n reqToken = json.loads(data)[\"token\"]\n if len(reqToken) >= 8 and len(reqToken) <= 32:\n found = Token.query.filter(Token.token == f'{reqToken}').first()\n print(found)\n if found:\n message = \"Success! It's an older code, sir, but it checks out.\" # noqa\n else:\n message = \"Code not found.\"\n else:\n message = 'Invalid token length.'\n else:\n message = 'Invalid JSON request'\n return jsonify(status=message)", "def access_valid_token(token_code):\n token_code = remove_unicode(Bytes.for_string_or_unicode(token_code).as_encoded_str())\n\n prefix = token_code[:TOKEN_NAME_PREFIX_LENGTH]\n if len(prefix) != TOKEN_NAME_PREFIX_LENGTH:\n return None\n\n suffix = token_code[TOKEN_NAME_PREFIX_LENGTH:]\n\n # Lookup the token by its prefix.\n try:\n token = (\n AppSpecificAuthToken.select(AppSpecificAuthToken, User)\n .join(User)\n .where(\n AppSpecificAuthToken.token_name == prefix,\n (\n (AppSpecificAuthToken.expiration > datetime.now())\n | (AppSpecificAuthToken.expiration >> None)\n ),\n )\n .get()\n )\n\n if not token.token_secret.matches(suffix):\n return None\n\n assert len(prefix) == TOKEN_NAME_PREFIX_LENGTH\n assert len(suffix) >= MINIMUM_TOKEN_SUFFIX_LENGTH\n update_last_accessed(token)\n return token\n except AppSpecificAuthToken.DoesNotExist:\n pass\n\n return None", "def _check_authorization(cls, authzr, identifier):\n if authzr.body.identifier != identifier:\n raise errors.UnexpectedUpdate(authzr)\n return authzr", "def test_replace_o_auth_client_authorization(self):\n pass", "def authenticate(credentials):", "def getauth_from_db(args):\n global logger\n global tool_names\n\n if (args['api_key'] != api_key):\n logger.info('api_key invalid')\n return auth_respond(1,'api_key invalid','')\n \n if args['card_id'] in all_access_card_ids_list:\n # found in all_access_card_ids_list. Grant access\n logger.info('granted: all_access_card')\n return auth_respond(0,'granted: all_access_card','')\n else:\n #\n # search db for card id\n #\n user_db_parsed = get_user_db()\n #sys.stderr.write(pprint.pformat(user_db_parsed) + '\\n')\n found_card_id = found_tool_auth = False \n for u in user_db_parsed:\n (db_card_id,db_user_name,db_tool_auths) = u\n if (db_card_id == args['card_id']):\n #\n # if we found the card id\n #\n found_card_id = True\n db_tool_auth_list = db_tool_auths.split(':')\n\n for db_tool in db_tool_auth_list:\n if db_tool == args['tool_id']:\n found_tool_auth = True\n #\n # and they are authorized\n #\n logger.info('getauth_from_db: found record. ' + \n ' db_card_id:' + db_card_id +\n ', user_name:' + db_user_name + \n ', tool_auths:' + db_tool_auths\n )\n #\n # then grant access\n #\n return auth_respond(0,'granted',db_user_name,db_tool_auths)\n\n if (not found_card_id):\n return auth_respond(1,'denied: unknown card','')\n if (not found_tool_auth):\n return auth_respond(1,'denied: not authorized','')\n\n return auth_respond(1, 'error: badly-formed request' )", "def SecondPart():\n return passwordChecker(data)", "def oauth2_process_code(self, request, redirect_uri):\n if 'code' in request.GET:\n # We've got a code from an authorisation, so convert it to a access_token\n\n self.oauth2_access_token(request.GET['code'], next=redirect_uri)\n\n request.session['oauth2_token'] = self.oauth2_token\n request.session['oauth2_token_expires'] = self.oauth2_token_expires\n\n return True\n # else: 'error_reason' in request.GET\n \n return False", "def _verify_token(self, token, request):\n # First check if this request was already verified.\n # `request.bound_data` is an attribute provided by Kinto to store\n # some data that is shared among sub-requests (e.g. default bucket\n # or batch requests)\n if REIFY_KEY not in request.bound_data:\n user_id = None\n client_name = None\n auth_client = self._get_auth_client(request)\n\n for scope, client in request.registry._fxa_oauth_scope_routing.items():\n try:\n profile = auth_client.verify_token(token=token, scope=aslist(scope))\n user_id = profile['user']\n scope = profile['scope']\n client_name = client\n\n # Make sure the bearer token scopes don't match multiple configs.\n routing_scopes = request.registry._fxa_oauth_scope_routing\n intersecting_scopes = [x for x in routing_scopes.keys()\n if x and set(x.split()).issubset(set(scope))]\n if len(intersecting_scopes) > 1:\n logger.warn(\"Invalid FxA token: {} matches multiple config\" % scope)\n return None, None\n\n break\n except fxa_errors.OutOfProtocolError:\n logger.exception(\"Protocol error\")\n raise httpexceptions.HTTPServiceUnavailable()\n except (fxa_errors.InProtocolError, fxa_errors.TrustError) as e:\n logger.debug(\"Invalid FxA token: %s\" % e)\n\n # Save for next call.\n request.bound_data[REIFY_KEY] = (user_id, client_name)\n\n return request.bound_data[REIFY_KEY]", "def authenticate(self, rfid):\n print(\"Auth id: [{}]\".format(rfid))\n\n values = {'id' : rfid}\n data = urllib.parse.urlencode(values)\n data = data.encode('utf-8')\n\n t1 = perf_counter()\n\n req = urllib.request.Request(self.auth_url, data)\n try:\n resp = urllib.request.urlopen(req, timeout=self.request_timeout)\n except URLError as err:\n print(\"URLError: auth_url:[{}]\".format(self.auth_url))\n print(\"URLError: {}\".format(err))\n print(\"Falling back to local cache\")\n cached = self.auth_from_cache(rfid)\n return cached\n except timeout as err:\n cached = self.auth_from_cache(rfid)\n return cached\n\n text = resp.read()\n\n t2 = perf_counter()\n print(\"Auth got [{}] in {} seconds\".format(text, t2-t1))\n\n if text == b'Granted':\n return True", "def take_auth(aid):\r\n auth_passwd = request.values.get('auth_passwd', '')\r\n with engine.with_session() as ss:\r\n cur_auth = ss.query(LxContractAuthorization).get(aid)\r\n if not cur_auth:\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'AUTH_NOT_EXISTS']})\r\n if not sha256_crypt.verify(auth_passwd, cur_auth.auth_passwd):\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'AUTH_PASSWD_ERROR']})\r\n update_dict = dict()\r\n update_dict['user'] = current_user\r\n cur_auth.update(update_dict)\r\n return jsonify({'success': True, 'data': cur_auth.contract_id})", "def verifier(self,code):\n \n client = oauth.Client(self.consumer)\n resp, content = client.request(self.access_token_url, \"POST\")\n if resp['status'] != '200':\n print resp\n raise FBError(\"Invalid response %s.\" % resp['status'])\n access_token = dict(urlparse.parse_qsl(content))\n self._access_token = access_token", "def checkJWT(token, roles_to_check, uuid=\"\", auth_mode=None, return_resp=False):\n\n print (\". \"*50)\n\n # is_authorized = True\n\n ### set the collection to user\n mongoColl = mongoConfigColls['endpoints']\n log_app.debug(\"checkJWT / auth_mode : %s\", auth_mode )\n log_app.debug(\"checkJWT / roles_to_check : %s\", roles_to_check )\n\n if auth_mode and uuid != \"\" : \n\n if 'all' not in roles_to_check :\n\n ### retrieving the root_url for authentication in general given the AUTH_MODE\n root_auth_doc = mongoColl.find_one({'apiviz_front_uuid': uuid, 'field' : 'app_data_API_root_auth'})\n # log_app.debug(\"checkJWT / root_auth_doc : \\n%s\", pformat(root_auth_doc) )\n\n auth_url = root_auth_doc['root_url'][auth_mode]\n log_app.debug( \"checkJWT / auth_url : %s\", pformat(auth_url) )\n\n ### retrieving the root_url and args for authentication\n confirm_auth_doc = mongoColl.find_one({'apiviz_front_uuid': uuid, 'field' : 'app_data_API_user_auth'})\n confirm_rooturl = confirm_auth_doc['root_url']\n confirm_user_role_path = confirm_auth_doc['resp_fields']['user_role']['path']\n log_app.debug( \"checkJWT / confirm_user_role_path : %s\", confirm_user_role_path) \n\n confirm_basestring = auth_url + confirm_rooturl\n # log_app.debug( \"checkJWT / confirm_basestring : %s\", pformat(confirm_basestring) )\n \n confirm_options = confirm_auth_doc['args_options']\n confirm_token_arg = ''\n for arg in confirm_options : \n if arg['app_arg'] == 'authToken' : \n confirm_arg = '?{}={}'.format(arg['arg'], token)\n \n confirm_url = confirm_basestring + confirm_arg\n # log_app.debug( \"checkJWT / confirm_url : %s\", pformat(confirm_url) )\n\n ### send request to service and read response\n auth_response = requests.get(confirm_url)\n auth_response_status = auth_response.status_code\n log_app.debug( \"checkJWT / auth_response_status : %s\", auth_response_status )\n auth_response_data = auth_response.json()\n # log_app.debug( \"checkJWT / auth_response_data : \\n%s\", pformat(auth_response_data) )\n\n print (\". \"*50)\n\n if return_resp : \n # return full auth response\n return {\n 'auth_response_data' : auth_response_data,\n 'auth_response_status' : auth_response_status,\n 'confirm_auth_doc' : confirm_auth_doc,\n }\n\n else :\n ### get role to check value in response\n auth_response_user_role = getValueFromDictAndPathString(auth_response_data, confirm_user_role_path)\n log_app.debug( \"checkJWT / auth_response_user_role : %s\", auth_response_user_role) \n # return is_authorized\n return auth_response_user_role in roles_to_check\n \n else : \n return True\n\n else : \n return False", "def get_authenticated_denied(self):", "def payload_add_auth_code(self, payload: dict):\n data = \"{}|{}\".format(payload[\"api_key\"], payload[\"order_number\"])\n payload.update(authcode=self.calculate_auth_code(data))", "def obtainAccessTokenBy3LeggedOAuth(self, auth_code):\r\n header = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}\r\n url = self._config['OAUTH2ENDPOINT']['huddleAccessTokenServer']\r\n\r\n body = {\"grant_type\": \"authorization_code\",\r\n \"client_id\": self._config['OAUTH2']['clientID'],\r\n \"redirect_uri\": self._config['OAUTH2']['redirectUri'],\r\n \"code\": auth_code}\r\n\r\n return self._adapter.postRequest(url, header, parse.urlencode(body))", "def test_replace_o_auth_authorize_token(self):\n pass", "def auth_contract(cid):\r\n read_perm = request.values.get('read_perm', 0, type=int)\r\n write_perm = request.values.get('write_perm', 0, type=int)\r\n sign_perm = request.values.get('sign_perm', 0, type=int)\r\n expire_days = request.values.get('expire_days', 0, type=int)\r\n sub_user_id = request.values.get('sub_user_id', 0, type=int)\r\n long_term_auth = True\r\n gmt_expire = None\r\n if 0 < expire_days < 15:\r\n long_term_auth = False\r\n gmt_expire = datetime.datetime.now() + datetime.timedelta(expire_days)\r\n # print gmt_expire\r\n if long_term_auth and not sub_user_id:\r\n return jsonify(\r\n {'success': False,\r\n 'errorMsg': constants.ERROR_CODE['LONG_AUTH_ONLY_TO_SUB_USER']})\r\n\r\n with engine.with_session() as ss:\r\n cur_contract = ss.query(LxContract).get(cid)\r\n partners = ss.query(LxContractParticipation).filter_by(contract_id=cid)\r\n sub_user = ss.query(LxUser).get(sub_user_id)\r\n\r\n if long_term_auth and (not sub_user.parent_id == current_user.id):\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'ONLY_AUTH_TO_SUB_USER']})\r\n print cur_contract.owner.id\r\n owner_auth = (cur_contract.owner.id == current_user.id)\r\n partner_auth = False\r\n for partner in partners:\r\n if partner.user_id == current_user.id:\r\n partner_auth = True\r\n # print 'owner_auth:: ' + str(owner_auth)\r\n # print 'partner_auth:: ' + str(partner_auth)\r\n if not (owner_auth or partner_auth):\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'NO_AUTH_CUR_CONTRACT']})\r\n if partner_auth and write_perm == 1:\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'NO_AUTH_CUR_CONTRACT']})\r\n\r\n auth_passwd = [random.choice(\r\n string.digits + string.ascii_lowercase + string.ascii_uppercase\r\n ) for i in range(0, 8)]\r\n auth_passwd = ''.join(auth_passwd)\r\n auth_hash = sha256_crypt.encrypt(auth_passwd)\r\n if not long_term_auth:\r\n new_auth = LxContractAuthorization(\r\n contract=cur_contract,\r\n read_perm=read_perm,\r\n write_perm=write_perm,\r\n sign_perm=sign_perm,\r\n auth_passwd=auth_hash,\r\n gmt_expire=gmt_expire,\r\n auth_own_user=current_user\r\n )\r\n ss.add(new_auth)\r\n else:\r\n new_auth = LxContractAuthorization(\r\n contract=cur_contract,\r\n read_perm=read_perm,\r\n write_perm=write_perm,\r\n sign_perm=sign_perm,\r\n auth_passwd=auth_hash,\r\n user=sub_user,\r\n auth_own_user=current_user\r\n )\r\n ss.add(new_auth)\r\n re_dict = dict()\r\n re_dict['auth_passwd'] = auth_passwd\r\n auth_url = 'http://' + config.SERVER_NAME + '/api/contract/take_auth/' \\\r\n + str(new_auth.id)\r\n re_dict['auth_url'] = auth_url\r\n return jsonify({'success': True, 'data': re_dict})", "def _verfify_auth_and_profiles_data (self, data):\n if type(data.get('profiles')) == dict:\n if len(str(data.get('authURL', ''))) > 10 and len(str(data.get('authURL', ''))) < 50:\n return True\n return False", "def test_auth_code_positive(self, api):\n self.builder.add_user(api.get_user())\n resp = api.login_user(api.get_user().username, api.get_user().password)\n self.builder.del_user(api.get_user())\n assert resp.status_code == 200", "def check_pwd_policy1(processed):\n policy, letter, pwd = processed\n return pwd.count(letter) in policy", "def googleAuthByToken(self, access_token, googleAuthenticatorCode, otp, smsTemplate2FA = ''):\n payload = {'apikey': self._lr_object._get_api_key(), 'access_token':access_token, 'googleAuthenticatorCode':googleAuthenticatorCode,'otp':otp,'smsTemplate2FA':smsTemplate2FA}\n url = self._lr_object.SECURE_API_URL + authEndpoint + \"account/2FA/Verification\"\n return self._lr_object._get_json(url, payload)", "def validate_code(self, client_id, code, client, request, *args, **kwargs):\n client = client or self._clientgetter(client_id)\n log.debug('Validate code for client %r and code %r', client.client_id, code)\n grant = self._grantgetter(client_id=client.client_id, code=code)\n if not grant:\n log.debug('Grant not found.')\n return False\n if hasattr(grant, 'expires') and datetime.datetime.utcnow() > grant.expires:\n log.debug('Grant is expired.')\n return False\n\n request.state = kwargs.get('state')\n request.user = grant.user\n request.scopes = grant.scopes\n return True", "def calculate_auth_code(self, data) -> str:\n return (\n hmac.new(\n bytes(self.config.get(VENE_PAYMENTS_BAMBORA_API_SECRET), \"latin-1\"),\n msg=bytes(data, \"latin-1\"),\n digestmod=hashlib.sha256,\n )\n .hexdigest()\n .upper()\n )", "def apply_auth():\n\tclient = BaiduOpenApi()\n\tapi = client.device.code\n\tresp = client.device.code.get(response_type=\"device_code\", scope=\"netdisk\")\n\t# open grant page and wait for user confirm\n\twebbrowser.open_new_tab(r\"http://openapi.baidu.com/device?code=%s\"%resp[\"user_code\"])\n\t# yield to main\n\tyield\n\t# main will tell user to confirm and it will take a while\n\t# polling to wait server back\n\tpolling_tokens(resp[\"device_code\"], resp[\"interval\"], resp[\"expires_in\"])", "def is_reserved(code):\n return 1000 <= code <= 2999", "def test_authenticate(self):\n a = Authenticator( self.logger )\n authentication_id = a.authenticate(\n self.patron_barcode, self.api_url_root, self.api_key, self.partnership_id, self.university_code )\n self.assertEqual(\n 27, len(authentication_id) )", "def check_auth(username, password):\n return username == 'aweber' and password == 'aweber1100'", "def auth_orcid(request):\n\n client_id = settings.ORCID_CLIENT_ID\n client_secret = settings.ORCID_CLIENT_SECRET\n redirect_uri = settings.ORCID_REDIRECT_URI\n scope = list(settings.ORCID_SCOPE.split(\",\"))\n oauth = OAuth2Session(client_id, redirect_uri=redirect_uri,\n scope=scope)\n params = request.GET.copy()\n code = params['code']\n\n try:\n token = oauth.fetch_token(settings.ORCID_TOKEN_URL, code=code,\n include_client_id=True, client_secret=client_secret)\n try:\n validators.validate_orcid_token(token['access_token'])\n token_valid = True\n except ValidationError:\n messages.error(request, 'Validation Error: ORCID token validation failed.')\n token_valid = False\n except InvalidGrantError:\n messages.error(request, 'Invalid Grant Error: authorization code may be expired or invalid.')\n token_valid = False\n\n if token_valid:\n orcid_profile, _ = Orcid.objects.get_or_create(user=request.user)\n orcid_profile.orcid_id = token.get('orcid')\n orcid_profile.name = token.get('name')\n orcid_profile.access_token = token.get('access_token')\n orcid_profile.refresh_token = token.get('refresh_token')\n orcid_profile.token_type = token.get('token_type')\n orcid_profile.token_scope = token.get('scope')\n orcid_profile.token_expiration = token.get('expires_at')\n orcid_profile.full_clean()\n orcid_profile.save()\n\n return redirect('edit_orcid')", "def accesscode(request, code):\n employee = Employee.objects.get(access_code=code)\n user = employee.user\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n return HttpResponseRedirect('/')", "def get_authorization():\n return True", "def test_valid_access_request(self):\n\n # Generate a valid auth token\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code'\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(200, response.status_code)\n\n # Assert that the token came back in the response\n token = response.json\n self.assertIsNotNone(token['access_token'])\n self.assertIsNotNone(token['expires_in'])\n self.assertIsNotNone(token['id_token'])\n self.assertIsNotNone(token['refresh_token'])\n self.assertIsNotNone(token['token_type'])\n self.assertEqual('Bearer', token['token_type'])\n\n # Assert that the access token is in the database\n with base.HybridSessionManager():\n access_token = \\\n token_api.access_token_get_by_token(token['access_token'])\n self.assertIsNotNone(access_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, access_token.user_id)\n self.assertEqual(token['id_token'], access_token.user_id)\n self.assertEqual(token['expires_in'], CONF.oauth.access_token_ttl)\n self.assertEqual(token['expires_in'], access_token.expires_in)\n self.assertEqual(token['access_token'], access_token.access_token)\n\n # Assert that the refresh token is in the database\n with base.HybridSessionManager():\n refresh_token = \\\n refresh_tokens.refresh_token_get_by_token(\n token['refresh_token'])\n\n self.assertIsNotNone(refresh_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, refresh_token.user_id)\n self.assertEqual(CONF.oauth.refresh_token_ttl,\n refresh_token.expires_in)\n self.assertEqual(token['refresh_token'], refresh_token.refresh_token)\n\n # Assert that the authorization code is no longer in the database.\n with base.HybridSessionManager():\n none_code = \\\n auth_api.authorization_code_get(authorization_code.code)\n self.assertIsNone(none_code)", "def get_authenticated_granted(self):", "def parseAuthResponse(self, code):\n oaDict = {}\n\n # Get tokens\n result = self.fetchToken(code)\n if not result['OK']:\n return result\n self.log.debug('Token RESPONSE:\\n', pprint.pformat(result['Value']))\n oaDict['Tokens'] = result['Value']\n\n # Get user profile\n result = self.getUserProfile(oaDict['Tokens']['access_token'])\n if not result['OK']:\n return result\n oaDict['UserProfile'] = result['Value']\n self.log.debug('User profile RESPONSE:\\n', pprint.pformat(result['Value']))\n\n # Get tokens\n result = self.fetchToken(refreshToken=oaDict['Tokens']['refresh_token'])\n if not result['OK']:\n return result\n oaDict['Tokens'] = result['Value']\n self.log.debug('Token RESPONSE:\\n', pprint.pformat(result['Value']))\n\n return S_OK(oaDict)", "def verify(self, phone, code, case):\n key = self.get_key(phone, case)\n tried_count = self.get_tried_count(key)\n if tried_count > self.tried_count:\n return False, 'tried too many times'\n else:\n if tried_count == 0:\n self.set_tried_count(key, 1)\n else:\n self.incr_count(key)\n saved_code = self.get_code(phone, case)\n verified = saved_code == code\n if verified:\n self.rm_code(phone, case)\n return verified, None\n else:\n return verified, '%s code verify failed' % case", "def authorized(self):\n pass", "def bad_substring_check_account(self, google_ads_account_id):\n pass", "def test_token_only_for_1_user(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n user_token = self.user.generate_auth_token(1)\n self.assertNotEqual(self.user.verify_auth_token(user_token),\n self.user2)", "def is_frozensand_auth_available(self):\n cvar = self.getCvar('auth')\n if cvar:\n auth = cvar.getInt()\n return auth != 0\n else:\n return False", "def identify_auth():\r\n from requests.auth import HTTPBasicAuth\r\n from requests.auth import HTTPDigestAuth\r\n # HTTPBasicAuth Auth Method\r\n response = requests.get(base_url + '/basic-auth/51zxw/8888', auth=HTTPBasicAuth('51zxw', '8888'))\r\n print(response.status_code)\r\n print(response.text)\r\n\r\n # HTTPDigestAuth Auth Method\r\n response = requests.get(base_url + '/digest-auth/auth/zwx/6666', auth=HTTPDigestAuth('zwx', '6666'))\r\n print(response.status_code)\r\n print(response.text)\r\n print(response.json())", "def check_pwd_policy2(processed):\n policy, letter, pwd = processed\n idx1 = policy[0] - 1\n idx2 = policy[-1] - 1\n return (pwd[idx1] == letter) ^ (pwd[idx2] == letter)", "def auth(self):\n # boundary = int(ssis_synctree_settings.get('SSIS_AUTOSEND', 'auth_boundary'))\n # ldap_auth = ssis_synctree_settings.get('SSIS_AUTOSEND', 'auth_above_equal')\n # manual_auth = ssis_synctree_settings.get('SSIS_AUTOSEND', 'auth_less_than')\n # return ldap_auth if int(self._grade) >= boundary else manual_auth\n return 'ldap_syncplus'", "def auth_user():\n global token\n app.logger.info(\"Microsoft Planner Service running on /auth port as expected\")\n try:\n request_count = 0\n if request_count == 0:\n token = get_tokens_as_app(client_id, user_code_info, tenant_id)\n request_count = 1 \n if 'access_token' in token:\n app.logger.info('Adding access token to cache...')\n add_token_to_cache(client_id, tenant_id, token)\n return_object = (f\"{token['refresh_token']}\")\n return render_template('token.html', return_object=return_object)\n else:\n return_error = (\"Token response did not result in a proper response. Athenticate again please.\")\n return render_template('token.html', return_error=return_error)\n except AttributeError or TypeError:\n return_error = ('Authentification failed. Please pull and restart your system and authenticate again.')\n return render_template('token.html', return_error=return_error)\n except adal.AdalError as err:\n return_error = (\"You're logged in with the wrong user. Please log out and authenticate again.\")\n return render_template('token.html', return_error=return_error)", "def generate_access_code(user_data):\n\n try:\n user = User.objects.get(cn=user_data['cn'])\n return user.code\n except User.DoesNotExist:\n user = User(cn=user_data['cn'])\n\n event_id = os.environ.get('EVENTBRITE_EVENTID')\n ticket_id = os.environ.get('EVENTBRITE_TICKETID')\n\n # now = datetime.datetime.utcnow()\n # end_time = now + datetime.timedelta(minutes=30) # valid for 30 min\n # end_time = end_time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n access_code = hexlify(os.urandom(3)).decode()\n\n req = eventbrite.post('/events/' + str(event_id) + '/access_codes/', {\n 'access_code.code': access_code,\n 'access_code.ticket_ids': [ticket_id],\n 'access_code.quantity_available': 1, # can only use once\n # 'access_code.end_date': end_time\n })\n\n if 'code' in req:\n user.code = req['code']\n user.save()\n return user.code\n else:\n return None", "def auth_request(phenny, input):\n admins = phenny.config.admins\n pattern = '(' + '|'.join([re.escape(x) for x in admins]) + ')'\n matches = re.findall(pattern, input)\n for x in matches:\n phenny.msg('NickServ', 'ACC ' + x)", "def auth():\n\tcode = request.query.code\n\tauth = 'https://foursquare.com/oauth2/access_token'\n\tparams = dict(\n\t\tclient_id=CLIENT_ID,\n\t\tclient_secret=CLIENT_SECRET,\n\t\tgrant_type='authorization_code',\n\t\tredirect_uri=REDIRECT_URI,\n\t\tcode=code\n\t)\n\tauth_says = fetch('%s?%s'%(auth, urlencode(params)))\n\tauth_response = json.loads(auth_says.content)\n\tif 'access_token' in auth_response:\n\t\toauth_token=auth_response['access_token']\n\t\tresponse.set_cookie('user', oauth_token, secret=CLIENT_SECRET)\n\t\tlogging.info('new oauth_token:%s'%oauth_token)\n\t\tredirect('/')\n\telse:\n\t\tlogging.error(auth_response)\n\t\tabort()", "def authorization():\n pass", "def get_everyone_denied(self):", "def authenticate(user, request):", "def test_read_o_auth_client_authorization(self):\n pass", "def _check_auth(self, group_id):\n return", "def auth():\n pass", "def auth():\n pass", "def FirstPart(): \n return passwordChecker_incorrect(data)", "def simple_validator(passport):\n if len(passport) == 8:\n return True\n if len(passport) == 7 and \"cid\" not in passport:\n return True\n return False", "def test_read_o_auth_authorize_token(self):\n pass", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def _verify_code(\n self,\n mock_add_credentials: Any,\n mock_request_user_sync: Any,\n mock_sendmail: Any,\n mock_recaptcha: Any,\n code: str = '',\n email: str = '[email protected]',\n ):\n mock_add_credentials.return_value = True\n mock_request_user_sync.return_value = True\n mock_sendmail.return_value = True\n mock_recaptcha.return_value = True\n with self.session_cookie_anon(self.browser) as client:\n with client.session_transaction():\n with self.app.test_request_context():\n # lower because we are purposefully calling it with a mixed case mail address in tests\n send_verification_mail(email.lower())\n signup_user = self.app.private_userdb.get_user_by_pending_mail_address(email)\n code = code or signup_user.pending_mail_address.verification_code\n\n return client.get('/verify-link/' + code)", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def test_wrong_pwmerchactive_public_list_2(self, _authorization):\n user1.headers['x-token'] = user1.headers['x-token'] + '1'\n user1.pwmerchactive(method='public_list', params={})\n assert user1.resp_pwmerchactive == {'code': -32034, 'message': 'EStateUnauth',\n 'data': {'field': 'token', 'reason': 'Expired or invalid', 'value': user1.headers['x-token']}}", "def get_everyone_granted(self):", "def authenticate_user(data):\n \n try:\n auth_token = data[\"auth_token\"]\n user_token = Token.objects.get(username=data[\"username\"])\n if user_token.token == auth_token:\n return True\n except:\n return False\n return False", "def pick_auth(self, areq, comparision_type=\"\"):\n if comparision_type == \"any\":\n return self.authn_broker[0]\n\n try:\n if len(self.authn_broker) == 1:\n return self.authn_broker[0]\n elif \"acr_values\" in areq:\n if not comparision_type:\n comparision_type = \"exact\"\n\n if not isinstance(areq[\"acr_values\"], list):\n areq[\"acr_values\"] = [areq[\"acr_values\"]]\n\n for acr in areq[\"acr_values\"]:\n res = self.authn_broker.pick(acr, comparision_type)\n logger.debug(\n \"Picked AuthN broker for ACR %s: %s\" % (str(acr), str(res))\n )\n if res:\n # Return the best guess by pick.\n return res[0]\n else: # same as any\n try:\n acrs = areq[\"claims\"][\"id_token\"][\"acr\"][\"values\"]\n except KeyError:\n return self.authn_broker[0]\n else:\n for acr in acrs:\n res = self.authn_broker.pick(acr, comparision_type)\n logger.debug(\n \"Picked AuthN broker for ACR %s: %s\" % (str(acr), str(res))\n )\n if res:\n # Return the best guess by pick.\n return res[0]\n\n except KeyError as exc:\n logger.debug(\n \"An error occured while picking the authN broker: %s\" % str(exc)\n )\n\n # return the best I have\n return None, None", "def decode_auth_token(auth_token): \n try: \n payload = jwt.decode(auth_token, getattr(settings, \"SECRET_KEY\", \"\"),algorithms=['HS256']) \n is_blacklisted_token = User.check_blacklist(auth_token)\n if is_blacklisted_token:\n return False,'Token blacklisted. Please log in again.'\n else:\n return True, payload['sub']\n except jwt.ExpiredSignatureError:\n return False,'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return False,'Invalid token. Please log in again.'", "def test_list_o_auth_authorize_token(self):\n pass", "def mifare_auth_a(self,address,key_a):\n if self._uid == False:\n raise RuntimeError(\"No Mifare card currently activated.\")\n if len(self._uid) == 4:\n uid = self._uid\n elif len(self._uid) == 7: # 10-byte UID cards don't exist yet.\n uid = self._uid[3:7] # Sequence 1, keep it simple.\n self.in_data_exchange(bytearray([MIFARE_COMMAND_AUTH_A,address]) + key_a + uid)", "def verify_code(email, val):\r\n # TODO: is this the right string?\r\n verification_string = email.lower() + '|' + val\r\n return hashlib.md5(verification_string).hexdigest()", "def auth_check():\n try:\n access_token = session_get('access_token')\n access_token_secret = session_get('access_token_secret')\n\n if access_token and access_token_secret:\n tk = get_twitter_keys()\n client = UserClient(\n tk.consumer_key,\n tk.consumer_secret,\n access_token=access_token,\n access_token_secret=access_token_secret)\n \"\"\"\n We need to make a call to verify_credentials in case the user\n has revoked access for this application. This is a rate-limited\n call and so this approach might not be ideal. If we end up\n having rate-limiting problems, we might try giving each user\n a unique application ID that is kept in local storage and used\n as a lookup for Twitter creds (vs. session data which is domain-\n specific and thus problematic for our extension-approach). This\n might allow us to consolidate Twitter creds per user rather than\n storing them for each domain visited.\"\"\"\n verif = client.api.account.verify_credentials.get()\n if verif.headers['status'].split()[0] == '200':\n return jsonify({'is_auth': 1})\n else:\n # possibly revoked access, although this will probably\n # get handled by the TwitterAuthError catch\n remove_session_credentials()\n return jsonify({'is_auth': 0})\n tk = get_twitter_keys()\n client = UserClient(tk.consumer_key, tk.consumer_secret)\n callback = 'http://'+request.host+url_for('auth_verify')\n token = client.get_authorize_token(callback)\n session_set('auth_token', token.oauth_token)\n session_set('auth_token_secret', token.oauth_token_secret)\n session_set('auth_redirect',\n request.args.get('redirect') or '')\n if (\n 'html' in request.headers['Accept']\n and request.args.get('_format') != 'json'):\n return redirect(token.auth_url)\n else:\n data = {'is_auth': 0, 'auth_url': token.auth_url}\n return jsonify(data)\n except TwitterAuthError:\n remove_session_credentials()\n return jsonify({'is_auth': 0})\n except Exception, e:\n traceback.print_exc()\n return jsonify({'error': str(e)})", "def auth_required(handler_method):\n\n def check_auth(self, *args):\n self.userid, self.credentials = load_session_credentials(self)\n self.mirror_service = create_service('mirror', 'v1', self.credentials)\n # TODO: Also check that credentials are still valid.\n if self.credentials:\n try:\n self.credentials.refresh(httplib2.Http())\n return handler_method(self, *args)\n except AccessTokenRefreshError:\n # Access has been revoked.\n store_userid(self, '')\n credentials_entity = Credentials.get_by_key_name(self.userid)\n if credentials_entity:\n credentials_entity.delete()\n self.redirect('/auth')\n return check_auth", "def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n try:\n rune = Rune.from_base64(b64str)\n except: # noqa: E722\n return False, \"runestring invalid\"\n if not self.is_rune_authorized(rune):\n return False, \"rune authcode invalid\"\n return rune.are_restrictions_met(values)", "def authn_and_authz(authn_callback=None, authz_callback=None):\n authentication(authn_callback)\n authorization(authz_callback)", "def openid_check_authentication(self, request):\n return request.answer(self.signatory)", "def verifyHashcode(digest):\n list_str = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n list_num = [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ]\n \n total = 0\n for i2 in range(len(digest)):\n digest_i = digest[i2]\n #print(\"digest_i =\", digest_i)\n \n for i1 in range(16):\n if digest_i == list_str[i1] and i2 != 0:\n total += list_num[i1]\n #print(\"total =\", total)\n #print(\"list_num[i1] =\", list_num[i1])\n continue\n \n #print(\"--- --- ---\")\n \n #print(\"total =\", total)\n \n checknum = total % 16\n #print(\"checknum =\", checknum)\n \n checkstr = list_str[checknum]\n #print(\"checkstr =\", checkstr)\n \n checkorg = digest[0]\n #print(\"checkorg =\", checkorg)\n \n if checkorg == checkstr:\n isValid = True\n else:\n isValid = False\n \n return isValid", "def auth_isok(self):\n # pylint: disable=W0603\n global KEY\n return_value = False\n if KEY is None:\n return_value = True\n elif self.headers.get('Authorization') == 'Basic ' + KEY:\n return_value = True\n return return_value" ]
[ "0.6526628", "0.6414991", "0.60478854", "0.5907109", "0.5845477", "0.57069147", "0.5651448", "0.55606484", "0.5527734", "0.546124", "0.54302317", "0.54030454", "0.53964144", "0.5381553", "0.53508997", "0.5312594", "0.52762026", "0.52541846", "0.52534354", "0.5245636", "0.5223989", "0.5218349", "0.52109975", "0.5181178", "0.51685584", "0.5165509", "0.5163951", "0.51636577", "0.51560736", "0.5153291", "0.51483697", "0.51402074", "0.51370484", "0.5134245", "0.5117096", "0.5111856", "0.5105167", "0.50981563", "0.5095471", "0.50949293", "0.5087478", "0.50672317", "0.5066118", "0.50638556", "0.5061846", "0.50558037", "0.505505", "0.5046204", "0.50438356", "0.50334746", "0.5031505", "0.5031397", "0.5027441", "0.5027417", "0.5023107", "0.50170106", "0.50147164", "0.5011807", "0.5007207", "0.4997939", "0.4993145", "0.49920166", "0.49883145", "0.4986532", "0.49848908", "0.49840164", "0.4981671", "0.49787822", "0.49404213", "0.49380755", "0.49111834", "0.49111834", "0.4906674", "0.49063456", "0.4903208", "0.49009648", "0.49009648", "0.49009648", "0.49009648", "0.49009648", "0.49009648", "0.49009648", "0.49009648", "0.48883575", "0.4884616", "0.4875788", "0.4870035", "0.48688415", "0.48661023", "0.48562127", "0.4848194", "0.48480648", "0.48479375", "0.4839453", "0.48390722", "0.48320073", "0.4831769", "0.48251322", "0.48243427", "0.48228472" ]
0.5466636
9
Allinone check that a runestring is valid, derives from this MasterRune and passes all its conditions against the given dictionary of values or callables
def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]: try: rune = Rune.from_base64(b64str) except: # noqa: E722 return False, "runestring invalid" if not self.is_rune_authorized(rune): return False, "rune authcode invalid" return rune.are_restrictions_met(values)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n return MasterRune(secret).check_with_reason(b64str, values)", "def test_any_rune(self):\n rule = 'alert (name:\"rune\"; regex:\".{64}\";)'\n\n tests = {\n \"A\"*64: [\"proxying connection from\", \"INFO : filter matched: 'rune'\"],\n \"\\x90\"*64: [\"proxying connection from\"],\n }\n\n self.run_rules(rule, tests, echo=False)", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def test_should_accept_alphanumeric_formulas(self):\n validator = CharCombinationValidator()\n\n for formula in self.correct_formulas:\n self.assertIsNone(validator(formula))", "def _valid_(s) :\n return LoKi.Dicts.MCFinderDicts.valid (s)", "def is_key_valid(self,key):\n if not key or any(map(lambda s: s in key,space_chars))\\\n or any(map(lambda s: s in key,bad_chars)):\n return False \n return True", "def Check_is_valid(self, String):\r\n\r\n if self.Special_Names.__contains__(String):\r\n return False\r\n elif self.Special_Names_no_Operands.__contains__(String):\r\n return False\r\n elif self.Special_Names_one_Operands.__contains__(String):\r\n return False\r\n elif self.Special_Names_two_Operands.__contains__(String):\r\n return False\r\n elif self.Data_types.__contains__(String):\r\n return False\r\n elif self.Registers.__contains__(String):\r\n return False\r\n elif self.Irvine32_functions.__contains__(String):\r\n return False\r\n elif String.__contains__('\"'):\r\n return False\r\n elif String.__contains__('\\''):\r\n return False\r\n elif String.__contains__('.'):\r\n return False\r\n elif String[0].isdecimal():\r\n return False\r\n if len(self.Data_variables) > 0:\r\n if self.Data_variables.__contains__(String):\r\n return False\r\n if len(self.Functions_names) > 0:\r\n if self.Functions_names.__contains__(String):\r\n return False\r\n if len(self.Labels_names) > 0:\r\n if self.Labels_names.__contains__(String):\r\n return False\r\n return True", "def test_is_valid(self, address):\n self.test_string(address)\n self.test_alnum(address)", "def is_valid(teorema, args):\n if args.ignore_case:\n for value in teorema.values():\n if args.pattern.lower() in value.lower():\n return True\n else:\n for value in teorema.values():\n if args.pattern in value:\n return True\n\n return False", "def test_should_raise_in_case_of_wrong_characters(self):\n validator = CharCombinationValidator()\n\n regex = re.compile(r'[\\(\\[\\{]\\)\\]\\}')\n forbidden_chars = regex.sub('', punctuation)\n for char in forbidden_chars:\n with self.assertRaises(FormulaValidationError):\n validator('Fe(O)2%s' % char)", "def eval_dict(self, value):\n\n okay = True\n if all(ast_class(k) == 'Str' for k in value.keys):\n count = 0\n for v in value.values:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay", "def validate(cls, tab_dict, raise_error=True):\r\n return key_checker(['type'])(tab_dict, raise_error)", "def validate(self, s):\n if len(s) == 0:\n return False\n if s in self.whitelist:\n return True\n if s in self.blacklist:\n return False\n\n # SQL Types are rarely used\n if 't' in s and 'f(t' not in s and 'At' not in s:\n return False\n\n if '1nf' in s:\n return False\n if 's1o' in s:\n return False\n if 'oo' in s:\n return False\n if 'v,s' in s:\n return False\n if 's,v' in s:\n return False\n if 'v,v' in s:\n return False\n if 'v,1' in s:\n return False\n if 'v,n' in s:\n return False\n if 'n,v' in s:\n return False\n if '1,v' in s:\n return False\n if 'Eo(' in s:\n return False\n if '(o(' in s:\n return False\n if '(o1' in s:\n return False\n if '(on' in s:\n return False\n if '(os' in s:\n return False\n if '(of' in s:\n return False\n if '(ov' in s:\n return False\n if 'B(n)' in s:\n return False\n if 'oso' in s:\n return False\n if 'o1o' in s:\n return False\n if 'ono' in s:\n return False\n\n # only 1 special case for this\n # 1;foo:goto foo\n # 1;n:k\n # the 'foo' can only be a 'n' type\n if ':' in s and not 'n:' in s:\n return False\n\n if '11' in s:\n return False\n\n if '))' in s:\n return False\n if '((' in s:\n return False\n if 'v1' in s:\n return False\n\n if 'nv' in s and ';T' not in s:\n return False\n if 'nn' in s and ';T' not in s:\n return False\n\n # select @version foo is legit\n # but unlikely anywhere else\n if 'vn' in s and 'Evn' not in s:\n return False\n\n if 'oE' in s:\n return False\n\n if 'A1' in s:\n return False\n if 'An' in s:\n return False\n if 'A(1' in s:\n return False\n\n if 'vov' in s:\n return False\n if 'vo1' in s:\n return False\n if 'von' in s:\n return False\n\n if 'ns' in s:\n if 'U' in s:\n return True\n if 'T' in s:\n return True\n return False\n\n if 'sn' in s:\n # that is... Tsn is ok\n if s.find('T') != -1 and s.find('T') < s.find('sn'):\n return True\n return False\n\n # select foo (as) bar is only nn type i know\n if 'nn' in s and 'Enn' not in s and ';T' not in s:\n return False\n\n if ',o' in s:\n return False\n\n if 'kk' in s and 'Tkk' not in s:\n return False\n\n if 'ss' in s:\n return False\n\n if 'ff' in s:\n return False\n\n if '1no' in s:\n return False\n\n if 'kno' in s:\n return False\n\n if 'nEk' in s:\n return False\n\n if 'n(n' in s:\n return False\n if '1so' in s:\n return False\n if '1s1' in s:\n return False\n if 'noo' in s:\n return False\n if 'ooo' in s:\n return False\n\n if 'vvv' in s:\n return False\n\n if '1vn' in s:\n return False\n if '1n1' in s:\n return False\n if '&1n' in s:\n return False\n if '&1v' in s:\n return False\n if '&1s' in s:\n return False\n if 'nnk' in s:\n return False\n if 'n1f' in s:\n return False\n # folded away\n if s.startswith('('):\n return False\n\n if '&o' in s:\n return False\n\n if '1,1' in s:\n return False\n if '1,s' in s:\n return False\n if '1,n' in s:\n return False\n if 's,1' in s:\n return False\n if 's,s' in s:\n return False\n if 's,n' in s:\n return False\n if 'n,1' in s:\n return False\n if 'n,s' in s:\n return False\n if 'n,n' in s:\n return False\n if '1o1' in s:\n return False\n if '1on' in s:\n return False\n if 'no1' in s:\n return False\n if 'non' in s:\n return False\n if '1(v' in s:\n return False\n if '1(n' in s:\n return False\n if '1(s' in s:\n return False\n if '1(1' in s:\n return False\n if 's(s' in s:\n return False\n if 's(n' in s:\n return False\n if 's(1' in s:\n return False\n if 's(v' in s:\n return False\n if 'v(s' in s:\n return False\n if 'v(n' in s:\n return False\n if 'v(1' in s:\n return False\n if 'v(v' in s:\n return False\n\n if s.startswith('n('):\n return False\n\n if s.startswith('vs'):\n return False\n\n if s.startswith('o'):\n return False\n\n if ')(' in s:\n return False\n\n # need to investigate T(vv) to see\n # if it's correct\n if 'vv' in s and s != 'T(vv)':\n return False\n\n # unlikely to be sqli but case FP\n if s in ('so1n)', 'sonoE'):\n return False\n\n return True", "def __call__(self, value):\n valid = True\n for regex in self.regexs:\n search = regex.search(value)\n valid = valid and ( search != None)\n if not valid or len(value) < self.min_length:\n raise ValidationError(self.message, code=self.code)", "def condition_is_valid(self):\n cond = self.condition.lower()\n cond = re.sub('\\s+', ' ', cond)\n\n for ap in self.available_functions:\n ap = ap.lower()\n\n ret = re.search(ap, cond)\n if ret:\n # [('a', 'b'), ('a', 'b'), ...]\n self.used_functions[ap] = re.findall(ap, cond)\n cond = re.sub(ap, ' ', cond)\n\n # print self.used_functions\n for op in ['and', 'or', 'not']:\n cond = re.sub('\\s%s\\s' % op, ' ', cond)\n\n cond = re.sub('\\(', '', cond)\n cond = re.sub('\\)', '', cond)\n cond = re.sub('\\s+', '', cond)\n\n return len(cond) == 0", "def test_accept_letter_unmasked_masked(self):\n # Prepare test\n letter_a = 'a'\n letter_b = 'b'\n self.console.in_valid_letter = MagicMock(return_value=letter_b)\n self.console.word.is_masked.side_effect = [False, True, StopIteration]\n\n # Run test\n result = self.console.accept_letter(letter_a)\n\n # Evaluate test\n self.assertEqual(letter_b, result)", "def is_valid(self, string) -> bool:\n while '()' in string or '{}' in string or '[]' in string:\n string = string.replace('()', '').replace('[]', '').replace('{}', '')\n\n return len(string) == 0", "def test_accept_letter_masked(self):\n # Prepare test\n letter = 'a'\n self.console.in_valid_letter = MagicMock(return_value='b')\n self.console.word.is_masked.return_value = True\n\n # Run test\n result = self.console.accept_letter(letter)\n\n # Evaluate test\n self.assertEqual(letter, result)", "def _validate_string(self, path, value, value_is_key=False):\r\n value = re.sub('[/$#{}._|*=\\-]', ' ', value)\r\n\r\n tokens = nltk.tokenize.word_tokenize(value)\r\n for raw_token in tokens:\r\n if raw_token.startswith(\"'\"):\r\n raw_token = raw_token[1:]\r\n if self.corpus.validate_token(raw_token):\r\n continue\r\n sub_tokens = Validator.camel_case_split(raw_token)\r\n ret = True\r\n for sub_token in sub_tokens:\r\n ret = ret and self.corpus.validate_token(sub_token)\r\n\r\n if not ret:\r\n self.errors.append({\r\n \"isKey\": value_is_key,\r\n \"path\": path,\r\n \"typo\": raw_token,\r\n })", "def _security_check_parameters(param_dict):\n for key, value in param_dict.iteritems():\n str_value = str(value) # Could easily be an int or a float\n for bad_str in [\";\", \"&&\", \">\", \"<\", \"|\"]:\n if bad_str in str_value:\n raise ValueError(\"Rejecting suspicious argument for %s\" % key)", "def test_valid_str(self):\n try:\n lowercase_validator('hg213i75%^&$efg')\n except ValidationError:\n self.fail('String raised ValidationError unexpectedly')", "def check_character(char, name, parameters):\r\n if char in name:\r\n raise NameError('Invalid character in the variable name: ' + name)\r\n\r\n # Make sure people don't include # within the name of parameters\r\n for item in parameters.keys():\r\n if char in item:\r\n raise NameError('Invalid character in the variable parameters: ' + item)", "def should_run(self, case: Tuple[Dict[str, Any], ...]) -> bool:\n return True", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def __allowed_values_correct_string(self):\n strTestName = 'Values of a string (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'string')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramAllowed('parameter1', ['Allowed string #1', 'Allowed string #2'])\n\n RxCSObject.parameter1 = 'Allowed string #2'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def valid(f):\n try:\n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n # \"\\b\" is a word boundary\n \"\"\"We need to exclude numbers starting with zero,\n as these are interpretted as base8 (octal). This in\n turn could cause interpretation errors, and exceptions\n (for example 09 is not octal and will throw and exception)\"\"\"\n except (ArithmeticError, SyntaxError):\n return False", "def check_redditor(self, args):\n\n for user in args.redditor:\n if any(char.isalpha() for char in user[1]) \\\n or self._illegal_chars.search(user[1]) != None \\\n or int(user[1]) == 0:\n raise ValueError", "def test_valid_alpha(alpha: Any) -> None:\n check_alpha(alpha=alpha)", "def test_allowed_string(self):\n val = DwcaValidator(yaml.load(self.yaml_allowed_string, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'abundance': 'many'}\n self.assertTrue(val.validate(document))\n document = {'abundance': 'female'}\n self.assertFalse(val.validate(document))", "def validate_strength(cls, value: str) -> (bool, dict):\n if value is None:\n return False, {}\n\n length = cls._validate_length(value)\n digit = cls._validate_digit(value)\n uppercase = cls._validate_uppercase(value)\n lowercase = cls._validate_lowercase(value)\n symbol = cls._validate_symbol(value)\n\n valid = all([length, digit, uppercase, lowercase, symbol])\n error_dict = {\n 'length': length,\n 'digit': digit,\n 'uppercase': uppercase,\n 'lowercase': lowercase,\n 'symbol': symbol,\n }\n\n return valid, error_dict", "def is_valid_char(t_char):\r\n eax = 1 # mi preparo il flag \"invalido\" per il carattere\r\n \r\n # se il carattere e' un operatore, un operando o uno spazio\r\n # il carattere e' valido\r\n if is_operator(t_char) == 0:\r\n # e' operatore\r\n eax = 0\r\n \r\n if is_operand(t_char) == 0:\r\n # e' operando\r\n eax = 0\r\n \r\n if ord(t_char) == 32:\r\n # e' uno spazio\r\n eax = 0\r\n\r\n return eax", "def _check_value(self):\n value = str(self._value_field.toPlainText())\n if value=='': return True\n ACCEPTABLES_CHARS = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0',\n '.', ',', ';', ' ', '\\n', '-')\n\n for char in value:\n if not char in ACCEPTABLES_CHARS:\n return False\n if Variable.is_acceptable_arg(value):\n rows, columns = np.matrix(value).shape\n return 1 <= rows <= 4 and 1 <= columns <= 4\n else:\n return False", "def check(chname='Bu'):\r\n def sval(vname):\r\n sname = chname+str(vname)\r\n v = eval(sname)\r\n return v\r\n print ' value fdfs ',fdfs\r\n print ' value for: ',chname\r\n print ' BR ',sval('BR')\r\n print ' Eff rec ',sval('E_rec')\r\n print ' Eff sel ',sval('E_sel')\r\n print ' Eff recsel ',sval('E_recsel')\r\n print ' Eff trg ',sval('E_trg')\r\n print ' Eff (Bmm) recsel ',BmmE_recsel\r\n print ' Eff (Bmm) trg ',BmmE_trg\r\n print ' Rat E recsel ',sval('RatE_recsel')\r\n print ' Rat E trg ',sval('RatE_trg')\r\n print ' Ncan ',sval('Ncan')\r\n print ' alpha-nude ',sval('')\r\n print ' alpha_d ',sval('ad')\r\n print ' alpha_s ',sval('as')\r\n print ' cpar_toys ',sval('b')", "def is_valid(self):\n\n # Test whether every element in required_keys is in actual_keys\n actual_keys = set(self.fields.keys())\n required_keys = set(self.required_keys)\n has_required_keys = required_keys <= actual_keys\n if not has_required_keys:\n return False\n\n # TODO: Complete the following block. \n\n # Assume all is valid at first, then as soon as one invalid\n # is detected, whole thing becomes invalid.\n all_valid = True \n\n # Now iterate over each key-value pair to check\n for key, value in self.fields.items():\n if key == 'byr':\n this_key_valid = len(str(value)) == 4 and (1920 <= value <= 2002)\n all_valid = all_valid and this_key_valid\n if key == 'iyr':\n this_key_valid = len(str(value)) == 4 and (2010 <= value <= 2020)\n all_valid = all_valid and this_key_valid\n if key == 'eyr':\n this_key_valid = len(str(value)) == 4 and (2020 <= value <= 2030)\n all_valid = all_valid and this_key_valid\n if key == 'hgt':\n if len(str(value)) < 4:\n all_valid = False\n else:\n ending = value[-2:]\n num = int(value[:-2])\n this_key_valid = (ending == 'in' and (59 <= num <= 76)) or (ending == 'cm' and (150 <= num <= 193))\n all_valid = all_valid and this_key_valid\n if key == 'hcl':\n re_str = '#[0-9a-f]{6}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 7\n all_valid = all_valid and this_key_valid\n if key == 'ecl':\n this_key_valid = value in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n all_valid = all_valid and this_key_valid\n if key == 'pid':\n re_str = '[0-9]{9}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 9\n all_valid = all_valid and this_key_valid\n if key == 'cid':\n this_key_valid = True\n all_valid = all_valid and this_key_valid\n\n # If all fields are valid, return True\n return all_valid", "def check_string( pname, use ):\n for l in pname:\n if l in string.letters: continue\n if l in string.digits : continue\n if l =='_' : continue\n print( \"your \"+use+\" (\" + pname + \") contains invalid characters, please choose another one!\" )\n return False\n return True", "def test_letters(self):\n self.assertFalse(validate_measure_input('a', self.measures))\n self.assertFalse(validate_measure_input('1a', self.measures))", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def __check_validation(input_string):\n if not input_string:\n raise NullInputException(\"Input string should be not empty\")\n if type(input_string) != str:\n raise NonStringInputException(\"Input value should be a string\")\n if len(input_string) >= 200:\n raise TooLongInputException(\"Input string should be less than 200 characters\")\n for i in input_string:\n if not i.isalpha():\n raise NonStringInputException(\"All input value characters should be an alpha\")", "def validate_required_string(dictionary, dict_name, value, yaml_file):\n\n validate_dict_contains_value(dictionary, dict_name, value, yaml_file)\n validate_type(dictionary[value], value, str, 'str', yaml_file)\n del dictionary[value]", "def test_in_valid_letter(self):\n # Prepare test\n letters = ['ah', '1', 'A']\n self.console.input = MagicMock(side_effect=letters)\n\n # Run test\n result = self.console.in_valid_letter()\n\n # Evaluate test\n self.assertEqual(letters[2].lower(), result)", "def is_valid_query(query: Dict[str, Any]) -> bool:\n for name, value in query.items():\n if is_illegal_surrogate(name) or is_illegal_surrogate(value):\n return False\n return True", "def test_can_match(self):\n assert self.RNA(\"\").can_match(\"\")\n assert self.RNA(\"UCAG\").can_match(\"UCAG\")\n assert not self.RNA(\"UCAG\").can_match(\"ucag\")\n assert self.RNA(\"UCAG\").can_match(\"NNNN\")\n assert self.RNA(\"NNNN\").can_match(\"UCAG\")\n assert self.RNA(\"NNNN\").can_match(\"NNNN\")\n assert not self.RNA(\"N\").can_match(\"x\")\n assert not self.RNA(\"N\").can_match(\"-\")\n assert self.RNA(\"UCAG\").can_match(\"YYRR\")\n assert self.RNA(\"UCAG\").can_match(\"KMWS\")", "def valid(f):\n try: \n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n except ArithmeticError:\n return False", "def valid(f):\n try: \n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n except ArithmeticError:\n return False", "def properDayInput(day):\r\n possibleStrings = [\"m\",\"mon\",\"monday\",\"tu\",\"tue\",\"tues\",\"tuesday\",\"w\",\r\n \"we\",\"wed\",\"wednesday\",\"th\",\"tr\",\"r\", \"thu\",\"thur\",\"thurs\",\"thursday\",\"f\",\"fr\",\r\n \"fri\",\"friday\",\"sa\",\"sat\",\"saturday\",\"su\",\"sun\",\"sunday\"]\r\n \r\n validString = False\r\n for i in range(0, len(possibleStrings)):\r\n if possibleStrings[i] == day.lower():\r\n validString = True\r\n return validString", "def test_invalid_str(self):\n with self.assertRaises(ValidationError):\n lowercase_validator('hg213i75%^&$Efg')", "def test_special_case(self):\n cases = [\n ('3467875434578764345789654', False),\n ('AAAAAAAAAAA', False),\n ('', False),\n ]\n for titulo_eleitoral, is_valid in cases:\n self.assertEqual(self.titulo_eleitoral.validate(titulo_eleitoral), is_valid)", "def test_validation(self):\r\n\r\n with self.assertRaises(ValidationError):\r\n Dictionary().validate([1,2,3])\r\n\r\n with self.assertRaises(ValidationError):\r\n Dictionary().validate('stringy')\r\n\r\n with self.assertRaises(ValidationError):\r\n Dictionary().validate(1)", "def run_main(raw, con, inl, mon, sub):\n # Ahmad please put (or import/call) your code here\n GOValid_func(raw,con,inl,mon,sub)", "def validate(self, params: Dict = None) -> None:\n masking_char = params.get(self.MASKING_CHAR)\n validate_parameter(masking_char, self.MASKING_CHAR, str)\n if len(masking_char) > 1:\n raise InvalidParamException(\n f\"Invalid input, {self.MASKING_CHAR} must be a character\"\n )\n\n validate_parameter(params.get(self.CHARS_TO_MASK), self.CHARS_TO_MASK, int)\n validate_parameter(params.get(self.FROM_END), self.FROM_END, bool)", "def handle_pure_literals(cnf):\n pure = dict()\n for clause in cnf:\n for lit in clause:\n match = pure.get(lit.name, None)\n if type(match) == type(lit) and not match.equals(lit):\n pure[lit.name] = \"invalid\"\n if not match:\n pure[lit.name] = lit\n\n for lit in pure.values():\n if lit == \"invalid\":\n continue\n cnf = simplify(cnf, lit) \n return cnf", "def eval_clue(self, clue):\n clue_type = clue[\"type\"]\n clue_args = clue[\"vals\"]\n if (clue_type == SAME):\n return self.are_same(clue_args[0], clue_args[1])\n elif (clue_type == NOTSAME):\n return self.are_not(clue_args[0], clue_args[1])\n elif (clue_type == XAWAY):\n return self.is_x_away(clue_args[0], clue_args[1], clue_args[2])\n elif (clue_type == ISAT):\n return self.is_at(clue_args[0], clue_args[1])\n elif (clue_type == NOTAT):\n return self.is_not_at(clue_args[0], clue_args[1])\n else:\n raise Exception(\"Invalid clue type \", clue[\"type\"])", "def test_validation(self):\n\n with self.assertRaises(ValidationError):\n Dictionary().validate([1,2,3])\n\n with self.assertRaises(ValidationError):\n Dictionary().validate('stringy')\n\n with self.assertRaises(ValidationError):\n Dictionary().validate(1)", "def _check_location_part(cls, val, regexp):\r\n if val is None:\r\n return\r\n\r\n if not isinstance(val, basestring):\r\n raise InvalidKeyError(cls, \"{!r} is not a string\".format(val))\r\n\r\n if regexp.search(val) is not None:\r\n raise InvalidKeyError(cls, \"Invalid characters in {!r}.\".format(val))", "def is_valid_input(letter_guessed):\n\n #calulate the lengh of the letters\n NumberOfCharacters = (len(letter_guessed))\n #convert input letters to Underscore\n NumberOfUnderscore = (NumberOfCharacters * \"_\")\n\n\n # All the letters in English\n EnglishLetter = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMOPQRSTUVWXYZ\"\n\n\n if NumberOfCharacters > 1:\n print(\"false\")\n\n # If the user entered English character the string will print the character a non-English character (for example, a sign such as: &, *), the string will print \"E2\n elif letter_guessed in EnglishLetter:\n print(\"true\")\n else:\n print(\"false\")", "def validate(self, text: Union[str, Text, None] = None) -> None:\n text = text or self.text\n if isinstance(text, str):\n text = TNTParser().parse(text)\n if text is not None:\n self.text = text\n try:\n for i, (lineno, line) in enumerate(text.items()):\n getattr(self, 'rule_' + line.rule.value\\\n .lower().replace(' ', '_').replace('-', '_'),\n self.rule_invalid)(i, line)\n except ProofMistake as exc:\n exc.args = (f\"line {line.lineno}: '{str(line)}' \" + exc.args[0], *exc.args[1:])\n raise", "def test_dna_validator(self):\n \n dna = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test invalid characters\n invalid_dna1 = 'EETGGAGACGGAAACASTCCGAGGACATCCGGAGGAACCCGGGGAGTZVTHHCTGAGTGGTAAT'\n # test invalid length\n invalid_dna2 = 'GGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test for invalid internal stop\n invalid_dna3 = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTTGAGTGGTAATC'\n expected_validationT = True\n expected_validationF = False\n result_validation1 = dna_validator(dna)\n self.assertEqual(result_validation1, expected_validationT)\n result_validation2 = dna_validator(invalid_dna1)\n self.assertEqual(result_validation2, expected_validationF)\n result_validation3 = dna_validator(invalid_dna2)\n self.assertEqual(result_validation3, expected_validationF)\n result_validation4 = dna_validator(invalid_dna3)\n self.assertEqual(result_validation4, expected_validationF)", "def test_valid_text_str(self):\n f = lws.valid_text\n assert f('string', r'[a-z]*') is True\n assert f('string', r'string') is True\n assert f('string', r'[0-9]*') is False\n assert f('', r'.*') is False\n assert f('abcde', lambda x: 'e' in x) is True\n assert f('abcde', lambda x: 'f' in x) is False", "def validate(data, badchars):\n assert(all(b not in data for b in badchars))", "def verify_rpc_value ( user_dict ):\n for key in user_dict:\n if not isinstance ( user_dict[ key ], str ):\n # Error code 422\n raise ValueError ( 'Value of {0} is not a string'.format ( key ) )", "def _is_valid_key(self, key):\n\t\t\n\t\t# If the key is not a string\n\t\tif not isinstance(key, str):\n\t\t\treturn False\n\t\telse:\n\t\t\tkey = str.upper(key)\n\t\t\n\t\t# If the given key does not match the standard notation XY\n\t\tif len(key) != 2:\n\t\t\treturn False\n\t\t\n\t\t# If the key is out of the board\n\t\tif key[0] not in self.columns or key[1] not in self.rows:\n\t\t\treturn False\n\t\t\n\t\t# Otherwise the key is valid\n\t\treturn True", "def valid(f):\r\n try:\r\n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\r\n except ArithmeticError:\r\n return False", "def vowel_check(char) : \n \n try :\n if len(char) > 1 :\n print(\"Plase enter a string of length 1.\")\n return \"None\"\n \n if char in 'aeiou' :\n return 'TRUE'\n else :\n return 'FALSE'\n except :\n pass", "def __allowed_values_inccorrect_string(self):\n strTestName = 'Values of a string (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n\n RxCSObject.paramAddMan('parameter3', 'string')\n RxCSObject.paramType('parameter3', str)\n RxCSObject.paramAllowed('parameter3', ['Allowed string #1', 'Allowed string #2'])\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 21\n RxCSObject.parameter3 = 'Allowed string #3'\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def run_checks(entity, string):\n checks = defaultdict(list)\n\n # Prevent empty translation submissions if supported\n if string == \"\" and entity.resource.allows_empty_translations:\n checks[\"pndbWarnings\"].append(\"Empty translation\")\n\n return checks", "def test_valid_key(self):\n f = lws.valid_data_key\n assert f('string', int, r'string') is False\n assert f('string', str, r'test') is False\n assert f(123, int, '123') is False\n assert f(123.00, float, '123') is False\n assert f('123', str, r'[0-9]*') is True", "def verify_hack_key(self):\r\n\t\tself.percent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif self.percent_english > 50:\r\n\t\t\tself.hack_plausible = True", "def test_valid(self):\n template = '{0} just right {1}'\n value_count = 2\n try:\n validate_str_substitution(template, value_count)\n except ValidationError:\n self.fail('Name raised ValidationError unexpectedly')", "def _check_logic_syntax(string):\n return logExp.matches(string)", "def valid(f):\n try:\n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n except ArithmeticError:\n return False", "def __init__(self, targetChar, guessChar, leftChar=None, rightChar=None):\r\n self.guessChar = guessChar\r\n self.result = self.validateCharacter(targetChar, guessChar, leftChar=leftChar, rightChar=rightChar)", "def _validate_word(self, word):\n return type(word) == type('a') and set(self._letters) == set(list(word))", "def _is_valid_key(self, key):\r\n\r\n # Check the length\r\n if len(key) > 250:\r\n return False\r\n\r\n # Check that there are no spaces or control characters\r\n for char in key:\r\n if ord(char) < 33 or ord(char) == 127:\r\n return False\r\n\r\n return True", "def validate(prop, string, node, match, entry_start, entry_end):\n return True", "def string_validate_regular_expression(cls, value):\n if value is None:\n return value\n\n if not re.match(r\"[a-z]\", value ,re.IGNORECASE):\n raise ValueError(r\"must validate the regular expression /[a-z]/i\")\n return value", "def test_true_lower(self):\n self.assertRaises(ParseException, self.flag.parseString, 'y')", "def validate(name, bracket, bracket_side, bfr):\n\n return bfr[bracket.begin:bracket.end].islower()", "def testCheck(self):\r\n from pydsl.Check import RegularExpressionChecker\r\n input_str = \"abc\"\r\n checker = RegularExpressionChecker(input_str)\r\n self.assertTrue(checker.check(input_str))\r\n self.assertTrue(checker.check([x for x in input_str]))\r\n self.assertTrue(checker.check([x for x in input_str]))\r\n self.assertTrue(checker.check(input_str))\r\n self.assertFalse(checker.check(\"abd\"))\r\n self.assertFalse(checker.check(\"\"))", "def is_valid_raw(command): \n # default state\n valid = True\n \n # split the command into sections\n data_list = command[:-1].split(' ')\n \n # check the command's validity\n if (len(data_list) < 3) or ((data_list[0] != '<READ') and \\\n (data_list[0] != '<WRITE')):\n # if the command is too long and doesn't start corectly then it is \n # invalid \n valid = False\n \n elif (len(data_list[1]) != 5) or not data_list[1].startswith('0x'):\n # if the address field is not the right length and doesnt start \n # wit the hexidecimal identifier then it is invalid\n valid = False\n \n elif (data_list[1][4] != ',') or not is_hex(data_list[1][2:-1]):\n # if the address doean't end with a comma or the number portion is \n # not a hexideciaml number then it is invalid\n valid = False\n \n elif ('WRITE' in data_list[0]) and \\\n any([not is_hex(item) for item in data_list[2:]]):\n # if it is a write command and any item in the data list is not\n # hexidecimal then it is invalid\n valid = False\n \n elif ('READ' in data_list[0]) and \\\n (len(data_list) != 3 or not data_list[2].isdigit()):\n # if it is a read command and there in not a single decimal length\n # specified then the command is invalid\n valid = False \n \n # end if\n \n # print errors associated with commands if required\n if ('READ' in command) and not valid:\n print '*** Invalid READ command, please refer to the'\\\n 'Read me for proper syntax ***' \n \n elif ('WRITE' in command) and not valid:\n print '*** Invalid WRITE command, please refer to the'\\\n 'Read me for proper syntax ***' \n # end if\n \n return valid", "def is_valid(self, name, value, returnError = False):\n convert = self.expected[name]\n if convert == str:\n # only worry about int, list, etc;\n return True\n try:\n val = ast.literal_eval(value)\n '''\n Safely evaluate an expression node or a Unicode or Latin-1 encoded string containing a Python literal or container display.\n The string or node provided may only consist of the following\n Python literal structures: strings, numbers, tuples, lists, dicts, booleans, and None.\n '''\n except Exception, e:\n if returnError:\n return e\n else:\n print \"Tried %s(%s), but failed.\" % (convert.__name__, value)\n print e\n return False\n else:\n return type(val) == convert", "def validate(self, key, val):\n return True", "def validate(self, key, val):\n return True", "def test_words_with_numbers(self):\n\n test_string = \"1. FC Köln\"\n test_anagram = \"anagram\"\n with pytest.raises(ValueError) as exc_info:\n is_anagram(test_string, test_anagram)\n expected_error_msg = \"should only contain letters!\"\n assert exc_info.match(expected_error_msg)", "def valid(formula):\r\n\r\n try:\r\n return not re.search(r'\\b0[0-9]', formula) and eval((formula) is True\r\n #except ArithmeticError:\r\n #return False\r\n except:\r\n return False", "def _CheckExceptionTerm(self, term, rules):\n flag = False\n for keyword in rules:\n if rules[keyword] == 'starts':\n flag = flag or term.startswith(keyword)\n if rules[keyword] == 'ends':\n flag = flag or term.endswith(keyword)\n if rules[keyword] == 'contains':\n flag = flag or (keyword in term)\n return flag", "def __call__(self, value): # noqa: D102\n if not isinstance(value, str):\n raise ValueError(f\"Input value must be a string. '{value}' is not.\")\n\n raw_value = value\n for c in self.remove_characters:\n value = value.replace(c, \"\")\n if not bool(re.match(f\"^[{self.allowed_characters},]+$\", value)):\n raise ValueError(\n f\"Input must only contain values '{self.allowed_characters},'. '{raw_value}' does not.\"\n )\n if not bool(\n re.match(\n f\"^([{self.allowed_characters}],)+[{self.allowed_characters}]$\", value\n )\n ):\n raise ValueError(\n f\"Input must have format '(?,?,?,?)'. '{raw_value}' does not.\"\n )\n if not all([c in value for c in self.required_characters]):\n raise ValueError(\n f\"Input must contain {self.required_characters}. '{raw_value}' does not.\"\n )\n return raw_value", "def main():\n basedir = os.path.dirname(os.path.realpath(__file__))\n file_path = os.path.join(basedir, 'input')\n\n valid_strings = 0\n\n with open(file_path, 'r') as input_file:\n for line in input_file:\n if validate(line):\n valid_strings += 1\n\n print \"Found {} valid strings.\".format(valid_strings)\n\n assert valid_strings == 55", "def checkValidString(self, string: str) -> bool:\n @lru_cache(None)\n def dp(index, diff):\n \"\"\"\n index is the index of string\n diff the counts of '(' - counts of ')'\n \"\"\"\n\n if index == len_s:\n return diff == 0\n if abs(diff) > len_s - index:\n return False\n c = string[index]\n index += 1\n if c == '(':\n return dp(index, diff + 1)\n elif c == ')':\n if diff - 1 < 0:\n return False\n return dp(index, diff - 1)\n else:\n return dp(index, diff + 1) or dp(index, diff - 1) or dp(index, diff)\n\n len_s = len(string)\n return dp(0, 0)", "def test_must_match(self):\n assert self.RNA(\"\").must_match(\"\")\n assert not self.RNA(\"N\").must_match(\"N\")\n assert not self.RNA(\"R\").must_match(\"R\")\n assert not self.RNA(\"N\").must_match(\"r\")\n assert not self.RNA(\"CGUACGCAN\").must_match(\"CGUACGCAN\")\n assert not self.RNA(\"U\").must_match(\"C\")\n assert not self.RNA(\"UUU\").must_match(\"UUC\")\n assert not self.RNA(\"UUU\").must_match(\"UUY\")\n assert self.RNA(\"UU-\").must_match(\"UU-\")\n assert self.RNA(\"UCAG\").must_match(\"UCAG\")", "def is_valid(t_input):\r\n eax = 1 # flag validita': inizialmente non valido (caso stringa di lunghezza 0)\r\n ecx = 0 # indice\r\n \r\n while t_input[ecx] != \"\\0\":\r\n eax = 1 # mi preparo il flag \"invalido\" per il carattere\r\n\r\n if is_valid_char(t_input[ecx]) == 0:\r\n # carattere valido\r\n eax = 0\r\n\r\n # se il carattere e' invalido\r\n if eax == 1:\r\n # salta fuori dal ciclo\r\n break\r\n\r\n ecx += 1\r\n # salta a inizio ciclo\r\n\r\n # eax e' 1 per stringhe vuote o \r\n # almeno un carattere invalido\r\n return eax", "def test_check_dna_chars_primers(self):\r\n\r\n header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 's1&data'],\r\n ['s2', 'CGTA', 'AAA1A', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_primers(header, mapping_data, errors)\r\n\r\n expected_errors = ['Invalid DNA sequence detected: AAA1A\\t2,2']\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should be able to suppress LinkerPrimerSequence check, won't\r\n # suppress ReversePrimer check\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'ReversePrimer', 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 'ACGT', 's1&data'],\r\n ['s2', 'CGTA', 'AAA1A', 'ACGTF', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_primers(header, mapping_data, errors,\r\n disable_primer_check=True)\r\n\r\n expected_errors = ['Invalid DNA sequence detected: ACGTF\\t2,3']\r\n\r\n self.assertEqual(errors, expected_errors)", "def _match(self, key, attributes=None, context=None):\n matching_data = Sanitizer.ensure_int(self._get_matcher_input(key, attributes))\n if matching_data is None:\n return False\n return self._lower <= self.input_parsers[self._data_type](matching_data) <= self._upper", "def calc(bot, sender, sendmsg, label, args):\n\n expr = \" \".join(args)\n banned = dir() + dir(builtins)\n for word in banned:\n if word in expr:\n sendmsg(\"Illegal word found: \" + word)\n return\n try:\n sendmsg(eval(expr))\n except Exception as e:\n sendmsg(str(e))", "def process_key(self, key, value, fields, rule_processing_key, tag_index):\r\n if fields[key].type == 'function':\r\n if not fields[key].function(rule_processing_key, tag_index):\r\n rule_response = \"Field has Invalid Value:\\t\" + str(rule_processing_key[METADATA][tag_index][key])\r\n return False, rule_response\r\n\r\n elif fields[key].type == 'regex':\r\n fields[key].attributefound()\r\n if re.fullmatch(fields[key].regex, value):\r\n fields[key].attributevalid()\r\n self.required_fields_index[fields[key].position].increment_count()\r\n elif re.fullmatch(fields[key].regex, str(value).upper()):\r\n rule_processing_key[METADATA][tag_index][key] = str(value).upper()\r\n fields[key].attributevalid()\r\n self.required_fields_index[fields[key].position].increment_count()\r\n else:\r\n rule_response = \"Field has Invalid Value:\\t\" + str(rule_processing_key[METADATA][tag_index][key])\r\n fields[key].attributeinvalid()\r\n return False, rule_response\r\n\r\n return True, \"\"", "def test_match_validate_any(self):\n self.analyzer = IBANAnalyzer(None, validate=True)\n self.paste.body = \"Mutlipe IBANS: DE89 3704 0044 0532 0130 00 and FR14 2004 1010 0505 0001 3 should not match\"\n match = self.analyzer.match(self.paste)\n self.assertTrue(match)\n\n # The validate method should filter the wrong FR IBAN out\n self.assertEqual(1, len(match))\n self.assertEqual(\"DE89 3704 0044 0532 0130 00\", match[0])", "def is_rune_authorized(self, other: Rune) -> bool:\n # Make copy, as we're going to update state.\n sha = self.shabase.copy()\n totlen = self.seclen\n for r in other.restrictions:\n pad = end_shastream(totlen)\n sha.update(pad)\n totlen += len(pad)\n enc = bytes(r.encode(), encoding='utf8')\n sha.update(enc)\n totlen += len(enc)\n\n return other.authcode() == sha.digest()", "def run(input_string):\n funclist =[query_is_empty,\n parentheses_are_uneven,\n operators_with_no_words_in_between,\n operator_following_opening_parenthesis_or_before_closing_parenthesis,\n quotation_marks_are_uneven,\n operators_within_exact_phrase,\n distance_must_be_between_1_and_999]\n errorcount = 0\n errorlist = []\n for func in funclist:\n if func(input_string) is False:\n errorcount += 1\n errorlist.append(\"Error: {}\".format(func.__name__))\n if errorcount != 0:\n return \"{} Errors found.\".format(errorcount), errorlist\n else:\n return True, []", "def needs_recoding(strings):\n for string in strings:\n for char in string:\n if 127 < ord(char) < 256:\n return True\n return False", "def test_should_raise_in_case_of_wrong_opening_closing_types(self):\n validator = CharCombinationValidator()\n\n with self.assertRaises(FormulaValidationError):\n validator(self.wrong_opening_closing_types)" ]
[ "0.6080785", "0.5750079", "0.5702773", "0.5655834", "0.54935133", "0.5481613", "0.5304172", "0.52878016", "0.5247526", "0.5234183", "0.523374", "0.52119046", "0.5198196", "0.51774484", "0.5176261", "0.517611", "0.51729107", "0.5167192", "0.51573974", "0.51418495", "0.5130349", "0.5119607", "0.50747323", "0.5071726", "0.50523967", "0.50478077", "0.5045036", "0.5007374", "0.50046164", "0.5002741", "0.49951118", "0.49807507", "0.49805132", "0.4974716", "0.4971016", "0.4969705", "0.49612063", "0.49437794", "0.4940052", "0.49294394", "0.4926871", "0.49220553", "0.492033", "0.49180093", "0.49180093", "0.49158967", "0.49145234", "0.4914149", "0.49120986", "0.49105564", "0.4896326", "0.48952836", "0.48927066", "0.48859656", "0.48859167", "0.48836994", "0.48671207", "0.48628184", "0.48613", "0.485642", "0.48563564", "0.4855318", "0.4853067", "0.48524448", "0.48445195", "0.48429447", "0.4836913", "0.4834013", "0.48255348", "0.48243886", "0.4822956", "0.482271", "0.48149535", "0.48132196", "0.4807802", "0.48056856", "0.47940683", "0.47859013", "0.47848338", "0.4778326", "0.47774488", "0.47733834", "0.47733834", "0.4768829", "0.4761163", "0.47575128", "0.47479448", "0.47413254", "0.47402957", "0.47397763", "0.4736554", "0.47346887", "0.47166753", "0.47163424", "0.47153756", "0.47079447", "0.46988058", "0.4694615", "0.46920463", "0.46917126" ]
0.64300054
0
Convenience function that the b64str runestring is valid, derives from our secret, and passes against these values. If you want to check many runes, it's more efficient to create the MasterRune first then check them, but this is fine if you're only checking one.
def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]: return MasterRune(secret).check_with_reason(b64str, values)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n try:\n rune = Rune.from_base64(b64str)\n except: # noqa: E722\n return False, \"runestring invalid\"\n if not self.is_rune_authorized(rune):\n return False, \"rune authcode invalid\"\n return rune.are_restrictions_met(values)", "def check(secret: bytes, b64str: str, values: Dict[str, Any]) -> bool:\n return check_with_reason(secret, b64str, values)[0]", "def is_seed_valid(seed):\n if seed == \"0\":\n return True\n\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def is_rune_authorized(self, other: Rune) -> bool:\n # Make copy, as we're going to update state.\n sha = self.shabase.copy()\n totlen = self.seclen\n for r in other.restrictions:\n pad = end_shastream(totlen)\n sha.update(pad)\n totlen += len(pad)\n enc = bytes(r.encode(), encoding='utf8')\n sha.update(enc)\n totlen += len(enc)\n\n return other.authcode() == sha.digest()", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def validate_admin (admin_secret):\n\n try:\n admin_secret = admin_secret.encode()\n hashed = app.config['ADMIN_SECRET'].encode()\n return bcrypt.checkpw(admin_secret, hashed)\n\n except Exception as e:\n return False", "def is_right_secret(self, secret):\n dct = []\n for digit in secret:\n if digit in dct:\n return 0\n else:\n dct.append(digit)\n return True", "def validate_db_admin (db_secret):\n\n try:\n db_secret = db_secret.encode()\n hashed = app.config['DB_SECRET'].encode()\n return bcrypt.checkpw(db_secret, hashed)\n except Exception as e:\n return False", "def test_dna_validator(self):\n \n dna = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test invalid characters\n invalid_dna1 = 'EETGGAGACGGAAACASTCCGAGGACATCCGGAGGAACCCGGGGAGTZVTHHCTGAGTGGTAAT'\n # test invalid length\n invalid_dna2 = 'GGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test for invalid internal stop\n invalid_dna3 = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTTGAGTGGTAATC'\n expected_validationT = True\n expected_validationF = False\n result_validation1 = dna_validator(dna)\n self.assertEqual(result_validation1, expected_validationT)\n result_validation2 = dna_validator(invalid_dna1)\n self.assertEqual(result_validation2, expected_validationF)\n result_validation3 = dna_validator(invalid_dna2)\n self.assertEqual(result_validation3, expected_validationF)\n result_validation4 = dna_validator(invalid_dna3)\n self.assertEqual(result_validation4, expected_validationF)", "def mnemonic_is_valid(mnemonic: str, wordlist=WORDLIST):\n try:\n mnemonic_to_bytes(mnemonic, wordlist=wordlist)\n return True\n except Exception as e:\n return False", "def ValidateEntry(Secret_Word_Masked, Secret_Word_Masked_Unspaced, Used_Char):\n Guess = input(\"\\nGuess A Letter: \")\n Guess = Guess.lower()\n\n while (len(Guess) > 1 or not Guess.isalpha()):\n print(\"\\nInvalid Entry: \\'%s\\'\" %Guess)\n print(\"Alphabetic Character(s) Already Used: %s\" %Used_Char)\n print(\"So Far The Secret Word is:\\n%s\" %Secret_Word_Masked)\n Guess = input(\"\\nPlease Enter Only A Single Alphabetic Character: \")\n Guess = Guess.lower()\n\n print(\"\\nValid Entry: \\'%c\\'\" %Guess)\n\n return Guess", "def check_valid_fernet(value):\n try:\n decoded = base64.urlsafe_b64decode(value)\n if len(decoded) != 32: return False\n return True\n except binascii.Error:\n return False", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def test_accept_letter_unmasked_masked(self):\n # Prepare test\n letter_a = 'a'\n letter_b = 'b'\n self.console.in_valid_letter = MagicMock(return_value=letter_b)\n self.console.word.is_masked.side_effect = [False, True, StopIteration]\n\n # Run test\n result = self.console.accept_letter(letter_a)\n\n # Evaluate test\n self.assertEqual(letter_b, result)", "def test_accept_letter_masked(self):\n # Prepare test\n letter = 'a'\n self.console.in_valid_letter = MagicMock(return_value='b')\n self.console.word.is_masked.return_value = True\n\n # Run test\n result = self.console.accept_letter(letter)\n\n # Evaluate test\n self.assertEqual(letter, result)", "def is_secret_string(value):\n if not isinstance(value, basestring):\n return False\n return bool(_secret_string_pattern.match(value))", "def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))", "def check_redditor(self, args):\n\n for user in args.redditor:\n if any(char.isalpha() for char in user[1]) \\\n or self._illegal_chars.search(user[1]) != None \\\n or int(user[1]) == 0:\n raise ValueError", "def test_secrets_add_wrong_format(secret):\n reana_token = \"000000\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n message = 'For literal strings use \"SECRET_NAME=VALUE\" format'\n\n result = runner.invoke(cli, [\"secrets-add\", \"-t\", reana_token, \"--env\", secret])\n assert result.exit_code == 1\n assert message in result.output", "def verify_pwd_str(provided_password: str, stored_hash: str) -> bool:\n salt = stored_hash[:64].encode('ascii')\n stored_password = stored_hash[64:]\n provided_password = provided_password.encode('utf-8')\n pwdhash = hashlib.pbkdf2_hmac('sha256', provided_password, salt, 100000)\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n return pwdhash == stored_password", "def test_different_seeds(self):\n\n test_string = \"just a string\"\n\n funcs = [\n CityHash64WithSeed,\n CityHash64WithSeeds,\n CityHash128WithSeed,\n ]\n\n for func in funcs:\n self.assertNotEqual(func(test_string, 0), func(test_string, 1))", "def _validate_word(self, word):\n return type(word) == type('a') and set(self._letters) == set(list(word))", "def isValid(text):\n return bool(re.search(r'\\R2D2\\b', text, re.IGNORECASE))", "def self_check() -> None:\n assert len(ZBASE32_ALPHABET) == 32\n\n # Test vector from https://github.com/matusf/z-base-32/blob/0.1.2/src/lib.rs\n assert zbase32_encode(b\"asdasd\") == \"cf3seamuco\"\n assert zbase32_decode(\"cf3seamuco\") == b\"asdasd\"\n\n # Test vector from https://www.uriports.com/blog/setting-up-openpgp-web-key-directory/\n # assert zbase32_encode(hashlib.sha1(b\"yourmail\").digest()) == \"hacabazoakmnagxwmkjerb9yehuwehbm\"\n # -> this hash is wrong, and I don't know what username gives the SHA1\n # e61980e2f0c2962c19f45a928207e0472744702b\n\n # Test vector from https://metacode.biz/openpgp/web-key-directory\n assert zbase32_encode(hashlib.sha1(b\"test-wkd\").digest()) == \"4hg7tescnttreaouu4z1izeuuyibwww1\"\n\n # Test vector from https://datatracker.ietf.org/doc/draft-koch-openpgp-webkey-service/\n assert (\n get_wkd_advanced_url(\"[email protected]\")\n == \"https://openpgpkey.example.org/.well-known/openpgpkey/example.org/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe\" # noqa\n )\n assert (\n get_wkd_direct_url(\"[email protected]\")\n == \"https://example.org/.well-known/openpgpkey/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe\"\n )\n\n # Test vector from https://wiki.gnupg.org/WKD\n assert (\n get_wkd_direct_url(\"[email protected]\")\n == \"https://intevation.de/.well-known/openpgpkey/hu/it5sewh54rxz33fwmr8u6dy4bbz8itz4?l=bernhard.reiter\"\n )", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def test_correct_barcode(self):\r\n original = 'ATTTTTTTTTCG'\r\n recieved = 'ATTTTTTTTTTT'\r\n possibilities = ['TGTATTCGTGTA', 'ATTTTTTTTTCG', 'TGTAGGCGTGTA',\r\n 'TGTAGAAGTGTA', 'TGTAGGCGTATA', 'TGTAAAAAAAAA']\r\n decoded, num_errors = barcode.correct_barcode(recieved, possibilities)\r\n self.assertEqual(decoded, original)\r\n self.assertEqual(num_errors, 2)", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def test_is_valid(self, address):\n self.test_string(address)\n self.test_alnum(address)", "def is_valid(passwd: str) -> bool:\n return (\n re.search(r'abc|bcd|cde|def|efg|fgh|ghi|hij|jkl|klm|lmn|mno|nop|opq|pqr|qrs|rst|stu|tuv|uvw|vwx|wxy|xyz', passwd) is not None and\n all([c not in passwd for c in 'iol']) and\n re.search(r'([a-z])\\1.*([a-z])\\2', passwd) is not None\n )", "def string_or_b64kms(value):\n if not value:\n return value\n\n try:\n # Check if environment value base64 encoded\n if base64_pattern.match(value):\n # If yes, decode it using AWS KMS\n data = base64.b64decode(value)\n decrypted_value = decrypt_kms_data(data)\n\n # If decryption succeed, use it\n if decrypted_value:\n value = decrypted_value\n except Exception as e:\n logging.exception(e)\n return value", "def check_raw_string(self, string, is_bstring=True):\n self.log(u\"Checking the given byte string\")\n self.result = ValidatorResult()\n if self._are_safety_checks_disabled(u\"check_raw_string\"):\n return self.result\n if is_bstring:\n self._check_utf8_encoding(string)\n if not self.result.passed:\n return self.result\n string = gf.safe_unicode(string)\n self._check_not_empty(string)\n if not self.result.passed:\n return self.result\n self._check_reserved_characters(string)\n return self.result", "def is_base64(string):\n return (not re.match('^[0-9]+$', string)) and \\\n (len(string) % 4 == 0) and \\\n re.match('^[A-Za-z0-9+/]+[=]{0,2}$', string)", "def test_string(self, s):\n\n data = s.split(' ')\n\n origin = ' '.join(data[0:-1])\n if not origin:\n return False\n \n origin_hashed = self.hash_with_salt(origin)\n\n return origin_hashed == s", "def generate_password():\n selection = string.ascii_letters + string.digits\n\n while True:\n password = \"\".join(secrets.choice(selection) for i in range(16))\n\n if (\n any(c.isupper() for c in password)\n and any(c.islower() for c in password)\n and any(c.isdigit() for c in password)\n ):\n break\n\n return password", "def test_allowed_string(self):\n val = DwcaValidator(yaml.load(self.yaml_allowed_string, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'abundance': 'many'}\n self.assertTrue(val.validate(document))\n document = {'abundance': 'female'}\n self.assertFalse(val.validate(document))", "def test_good_values_for_validate_guid(good_value):\n bcvalidators.validate_guid(good_value)", "def test_unicode_2_64(self):\n test_case = u\"\\u2661\" # pylint: disable=redundant-u-string-prefix\n self.assertTrue(isinstance(CityHash64WithSeed(test_case), long))", "def is_valid_password(variable):\n if re.match(r'[A-Za-z0-9@#$%^&+=]{8,}', variable):\n return True\n return False", "def check(self, plain_string, hashed_string, options={}, driver=None):\n return (\n self.get_driver(driver)\n .set_options(options or self.get_config_options(driver))\n .check(plain_string, hashed_string)\n )", "def is_valid_input(letter_guessed):\n\n #calulate the lengh of the letters\n NumberOfCharacters = (len(letter_guessed))\n #convert input letters to Underscore\n NumberOfUnderscore = (NumberOfCharacters * \"_\")\n\n\n # All the letters in English\n EnglishLetter = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMOPQRSTUVWXYZ\"\n\n\n if NumberOfCharacters > 1:\n print(\"false\")\n\n # If the user entered English character the string will print the character a non-English character (for example, a sign such as: &, *), the string will print \"E2\n elif letter_guessed in EnglishLetter:\n print(\"true\")\n else:\n print(\"false\")", "def test_valid_password_valid():\n assert valid_password(\"123456\")\n assert valid_password(\"abcdef\")", "def main():\n password = get_password(MINIMUM_LENGTH)\n convert_to_asterisks(password)", "def test_on_anagram(self):\n\n test_string = \"anagram\"\n test_anagram = \"gramana\"\n actual = is_anagram(test_string, test_anagram)\n assert actual == True", "def test_shorter_valid_string_is_contained(tricky_trie):\n assert tricky_trie.contains('bbb')", "def validate_authkey(value):\n if not len(value) == 32:\n raise ValidationError(\n 'Value must be a string containing 32 alphanumeric characters')", "def test_encodes_short_string(self):\n result = encode_run_length(\"AAABBBRYUIWW\")\n self.assertEqual(result, \"3A3B1R1Y1U1I2W\")", "def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False", "def __allowed_values_correct_string(self):\n strTestName = 'Values of a string (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'string')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramAllowed('parameter1', ['Allowed string #1', 'Allowed string #2'])\n\n RxCSObject.parameter1 = 'Allowed string #2'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def get_secret_word():\n pass", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return hsh == get_hexdigest(algo, salt, raw_password)", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return hsh == get_hexdigest(algo, salt, raw_password)", "def verifyHashcode(digest):\n list_str = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n list_num = [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ]\n \n total = 0\n for i2 in range(len(digest)):\n digest_i = digest[i2]\n #print(\"digest_i =\", digest_i)\n \n for i1 in range(16):\n if digest_i == list_str[i1] and i2 != 0:\n total += list_num[i1]\n #print(\"total =\", total)\n #print(\"list_num[i1] =\", list_num[i1])\n continue\n \n #print(\"--- --- ---\")\n \n #print(\"total =\", total)\n \n checknum = total % 16\n #print(\"checknum =\", checknum)\n \n checkstr = list_str[checknum]\n #print(\"checkstr =\", checkstr)\n \n checkorg = digest[0]\n #print(\"checkorg =\", checkorg)\n \n if checkorg == checkstr:\n isValid = True\n else:\n isValid = False\n \n return isValid", "def test_user1_method3():\n REGEX_MATCH_BCRYPT_HASH = r\"^\\$2[ayb]\\$.{56}$\"\n hashed_password = u.password.decode()\n assert re.match(REGEX_MATCH_BCRYPT_HASH, hashed_password), \"Password was not hashed correctly\"", "def test_should_accept_alphanumeric_formulas(self):\n validator = CharCombinationValidator()\n\n for formula in self.correct_formulas:\n self.assertIsNone(validator(formula))", "def test_consistent_encoding_64(self):\n text = u\"abracadabra\" # pylint: disable=redundant-u-string-prefix\n self.assertEqual(\n CityHash64WithSeed(text), CityHash64WithSeed(text.encode(\"utf-8\"))\n )", "def is_id(string):\n regex = re.compile('[0-9a-f]{32}\\Z', re.I)\n if bool(regex.match(string)):\n return True\n\n return False", "def test_handles_one_char(self):\n result = encode_run_length(\"R\")\n self.assertEqual(result, \"1R\")", "def verify_password(stored_passwd, provided_passwd):\n salt = stored_passwd[:64]\n stored_password = stored_passwd[64:]\n pwdhash = hashlib.pbkdf2_hmac(\n 'sha512', provided_passwd.encode('utf-8'), salt.encode('ascii'), 100000\n )\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n return pwdhash == stored_password", "def verify_password(entered_password):\n return PASSWORD_RE.match(entered_password)", "def _valid_(s) :\n return LoKi.Dicts.MCFinderDicts.valid (s)", "def _validate_bbg_id(x):\n return len(x) == 12 and x[:3] == 'BBG' and str.isalnum(x[3:11]) and sum(map(\n lambda u: u in ['A', 'E', 'I', 'O', 'U'], x[3:11])) == 0 and str.isdigit(x[11])", "def validate_puzzle_string(self):\n is_puzzle_string_valid = False\n while is_puzzle_string_valid is False:\n question = \"Enter a valid puzzle. (81 inline digits where zeros \" +\\\n \"represent empty spots) E.g. 01040506.... and so on\\npuzzle\"\n puzzle_parameter = self.ask_user_input(question)\n if not puzzle_parameter.isdigit():\n print(\"The puzzle should contain only digits, please try again\")\n elif len(puzzle_parameter) == 81:\n is_puzzle_string_valid = True\n self.current_response = puzzle_parameter\n else:\n print(\"The puzzle should contain exactly 81 digits, please try again\")\n return is_puzzle_string_valid", "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def is_valid(self):\n if len(self) <= 64 and re.match(RE_VALID_UID, self):\n return True\n\n return False", "def test_mask_secret_null():\n assert utils.mask_secrets(\"\", None) == \"\"", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def validate_safe_string(value):\n # The following strings are explicitly allowed, despite having otherwise-illegal chars.\n legal_strings_with_special_chars = frozenset({'@rid', '@class', '@this', '%'})\n\n if not isinstance(value, six.string_types):\n raise TypeError(u'Expected string value, got: {} {}'.format(\n type(value).__name__, value))\n\n if not value:\n raise GraphQLCompilationError(u'Empty strings are not allowed!')\n\n if value[0] in string.digits:\n raise GraphQLCompilationError(u'String values cannot start with a digit: {}'.format(value))\n\n if not set(value).issubset(VARIABLE_ALLOWED_CHARS) and \\\n value not in legal_strings_with_special_chars:\n raise GraphQLCompilationError(u'Encountered illegal characters in string: {}'.format(value))", "def test_kms_re_encrypt_fails_without_b64_secret(self):\n with self.assertRaises(SystemExit):\n ef_utils.kms_re_encrypt(self.mock_kms, self.service, self.env, self.secret)", "def check_word(word):\r\n if word in word_master:\r\n valid = True\r\n else:\r\n valid = False\r\n return valid", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return constant_time_compare(hsh, get_hexdigest(algo, salt, raw_password))", "def check_ean(eancode):\n if not eancode:\n return True\n if len(eancode) <> 13:\n return False\n try:\n int(eancode)\n except:\n return False\n return ean_checksum(eancode) == int(eancode[-1])", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def check_for_validity_puzzle_1(limits: tuple, rep_char: str, password: str):\n\n reps = password.count(rep_char)\n\n lower, upper = limits\n\n if lower <= reps <= upper:\n return True\n else:\n return False", "def __init__(self, targetChar, guessChar, leftChar=None, rightChar=None):\r\n self.guessChar = guessChar\r\n self.result = self.validateCharacter(targetChar, guessChar, leftChar=leftChar, rightChar=rightChar)", "def needs_recoding(strings):\n for string in strings:\n for char in string:\n if 127 < ord(char) < 256:\n return True\n return False", "def test_string_unicode_64(self):\n self.assertEqual(\n CityHash64WithSeed(EMPTY_STRING), CityHash64WithSeed(EMPTY_UNICODE)\n )", "def is_valid_password_v2(password):\n\n low = password[\"letter\"] == password[\"password\"][password[\"low\"] - 1]\n high = password[\"letter\"] == password[\"password\"][password[\"high\"] - 1]\n\n return xor(low, high)", "def create_secret_code():\n characters = string.ascii_uppercase + string.digits\n size = 6\n return ''.join(random.choice(characters) for _ in range(size))", "def test_validate_input_valid(self):\n final_config = self.dtm1.validate_input('00001111')\n nose.assert_equal(final_config[0], 'q4')\n nose.assert_equal(str(final_config[1]), 'TMTape(\\'xxxxyyyy.\\')')", "def properDayInput(day):\r\n possibleStrings = [\"m\",\"mon\",\"monday\",\"tu\",\"tue\",\"tues\",\"tuesday\",\"w\",\r\n \"we\",\"wed\",\"wednesday\",\"th\",\"tr\",\"r\", \"thu\",\"thur\",\"thurs\",\"thursday\",\"f\",\"fr\",\r\n \"fri\",\"friday\",\"sa\",\"sat\",\"saturday\",\"su\",\"sun\",\"sunday\"]\r\n \r\n validString = False\r\n for i in range(0, len(possibleStrings)):\r\n if possibleStrings[i] == day.lower():\r\n validString = True\r\n return validString", "def test_random_code_generator(self):\n # Produces similar to '8FHGNH'\n code = random_code_generator()\n self.assertEquals(len(code), 6)\n code_2 = random_code_generator()\n if code == code_2:\n self.assertEquals(False)\n # Produces similar to 'CFB-U8X-9KE-TY8':\n code_3 = random_code_generator(12, 4, '-')\n self.assertEquals(len(code_3), 15)\n self.assertEquals(len(code_3.replace('-', '')), 12)\n code_4 = random_code_generator(100, banned_chars='X')\n self.assertEquals(code_4.find('X'), -1)", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def test06_password_mixture(self):\n self.set_complexity(length=14, numeric=1, upper=1, lower=1, special=1)\n\n invalid = (\n \"A\",\n \"ACBDEabcde!!!!\",\n \"Tr0ub4dor&3\",\n \"!A_B@C£D\",\n \"@@PASSWORD123!!\",\n \"ADMIN\",\n \"A1aB2bC3cD4dE5eF6fG7g\",\n \"1234;.,/]1234\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"Sixteenchars12@_\",\n \"thisis4reallybadPassword!\",\n \"C0rrecthorsebatteryst@ple\",\n \"a!A@0£b$B%0^c&C*0(d)D_0+e\",\n 'Password1234\\'\"\"\"\"\"',\n )\n self.set_passwords(valid)", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test_check_dna_chars_primers(self):\r\n\r\n header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 's1&data'],\r\n ['s2', 'CGTA', 'AAA1A', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_primers(header, mapping_data, errors)\r\n\r\n expected_errors = ['Invalid DNA sequence detected: AAA1A\\t2,2']\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should be able to suppress LinkerPrimerSequence check, won't\r\n # suppress ReversePrimer check\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'ReversePrimer', 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 'ACGT', 's1&data'],\r\n ['s2', 'CGTA', 'AAA1A', 'ACGTF', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_primers(header, mapping_data, errors,\r\n disable_primer_check=True)\r\n\r\n expected_errors = ['Invalid DNA sequence detected: ACGTF\\t2,3']\r\n\r\n self.assertEqual(errors, expected_errors)", "def Check_is_valid(self, String):\r\n\r\n if self.Special_Names.__contains__(String):\r\n return False\r\n elif self.Special_Names_no_Operands.__contains__(String):\r\n return False\r\n elif self.Special_Names_one_Operands.__contains__(String):\r\n return False\r\n elif self.Special_Names_two_Operands.__contains__(String):\r\n return False\r\n elif self.Data_types.__contains__(String):\r\n return False\r\n elif self.Registers.__contains__(String):\r\n return False\r\n elif self.Irvine32_functions.__contains__(String):\r\n return False\r\n elif String.__contains__('\"'):\r\n return False\r\n elif String.__contains__('\\''):\r\n return False\r\n elif String.__contains__('.'):\r\n return False\r\n elif String[0].isdecimal():\r\n return False\r\n if len(self.Data_variables) > 0:\r\n if self.Data_variables.__contains__(String):\r\n return False\r\n if len(self.Functions_names) > 0:\r\n if self.Functions_names.__contains__(String):\r\n return False\r\n if len(self.Labels_names) > 0:\r\n if self.Labels_names.__contains__(String):\r\n return False\r\n return True", "def testWord(self, word):\n return self.crackHash(self.hashWord(word))", "def check_string( pname, use ):\n for l in pname:\n if l in string.letters: continue\n if l in string.digits : continue\n if l =='_' : continue\n print( \"your \"+use+\" (\" + pname + \") contains invalid characters, please choose another one!\" )\n return False\n return True", "def match(self,pwdmarked,password):\n pwd1 = self.cleanPassword(pwdmarked)\n pwd2 = self.cleanPassword(password)\n if not (pwdmarked or '').startswith('plain:{'):\n pwd2 = crypt(password,self.settings.authenSalt,10000)\n return pwd1==pwd2", "def test_mask_secret_simple_positive():\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"ls -lh /tmp/8bca8d2e /tmp/683c08d7-bc07 /1cd6-4ec0-8e55\"\n cmd_masked_expected = \"ls -lh /tmp/8bca8d2e /tmp/***** /1cd6-4ec0-8e55\"\n assert utils.mask_secrets(cmd, secrets) == cmd_masked_expected", "def is_valid_password_v1(password):\n letter_count = sum([x == password[\"letter\"] for x in list(password[\"password\"])])\n return password[\"low\"] <= letter_count <= password[\"high\"]", "def decode_match(s):\n if '=' in s:\n raise TypeError\n while True:\n try:\n bin = standard_b64decode(s)\n except TypeError, e:\n if str(e) != 'Incorrect padding':\n raise\n s += '='\n else:\n break\n return MatchProxy(bin)", "def is_any_valid_docker_container_id(string: str) -> bool:\n return re.fullmatch(\"^[0-9a-f]{64}$\", string) is not None", "def test_is_valid_hex(self):\n self.assertTrue(is_valid_hex('#aabb11'))\n self.assertTrue(is_valid_hex('#000'))\n self.assertTrue(is_valid_hex('#aaa'))\n self.assertFalse(is_valid_hex('black'))\n self.assertFalse(is_valid_hex('bl(ack'))", "def isValid(t_id):\n\tstr_id=str(t_id).strip()\n\treturn str_id.isdigit()" ]
[ "0.67954326", "0.6076462", "0.56105393", "0.55704355", "0.5488138", "0.54298264", "0.5401579", "0.5401579", "0.5336335", "0.5291665", "0.5288441", "0.5265383", "0.522915", "0.5190011", "0.51790816", "0.51719004", "0.51499397", "0.51450264", "0.51145905", "0.50659394", "0.50631905", "0.5052787", "0.50197434", "0.5016467", "0.5003911", "0.49980125", "0.4967239", "0.49660823", "0.49484655", "0.4921297", "0.49130854", "0.4888071", "0.48846585", "0.48754397", "0.48729917", "0.48722622", "0.48695695", "0.48459047", "0.48378718", "0.48324043", "0.48188093", "0.48159745", "0.48066616", "0.4805148", "0.48025838", "0.4802113", "0.48003054", "0.47986838", "0.47928774", "0.47914156", "0.4787178", "0.47871056", "0.47849202", "0.4778179", "0.47723863", "0.47723863", "0.47699603", "0.47627926", "0.47600046", "0.47595456", "0.47503263", "0.47456667", "0.47420347", "0.47387454", "0.4737841", "0.47374114", "0.47355348", "0.47326872", "0.47324762", "0.47324184", "0.4730238", "0.47254702", "0.47239798", "0.47231638", "0.47135907", "0.47111318", "0.47085974", "0.4706531", "0.47064838", "0.47034115", "0.47023395", "0.46927762", "0.46893865", "0.46839678", "0.46743685", "0.46710986", "0.46695697", "0.46663645", "0.46605307", "0.46582407", "0.4657097", "0.46525714", "0.46488807", "0.4646269", "0.46443152", "0.46432698", "0.46415788", "0.4640999", "0.46401072", "0.46383333" ]
0.7233146
0
Convenience function that the b64str runestring is valid, derives from our secret, and passes against these values. If you want to check many runes, it's more efficient to create the MasterRune first then check them, but this is fine if you're only checking one. Unlike check_with_reason(), this discards the reason and returns a simple True or False.
def check(secret: bytes, b64str: str, values: Dict[str, Any]) -> bool: return check_with_reason(secret, b64str, values)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n return MasterRune(secret).check_with_reason(b64str, values)", "def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n try:\n rune = Rune.from_base64(b64str)\n except: # noqa: E722\n return False, \"runestring invalid\"\n if not self.is_rune_authorized(rune):\n return False, \"rune authcode invalid\"\n return rune.are_restrictions_met(values)", "def is_seed_valid(seed):\n if seed == \"0\":\n return True\n\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def mnemonic_is_valid(mnemonic: str, wordlist=WORDLIST):\n try:\n mnemonic_to_bytes(mnemonic, wordlist=wordlist)\n return True\n except Exception as e:\n return False", "def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def is_rune_authorized(self, other: Rune) -> bool:\n # Make copy, as we're going to update state.\n sha = self.shabase.copy()\n totlen = self.seclen\n for r in other.restrictions:\n pad = end_shastream(totlen)\n sha.update(pad)\n totlen += len(pad)\n enc = bytes(r.encode(), encoding='utf8')\n sha.update(enc)\n totlen += len(enc)\n\n return other.authcode() == sha.digest()", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def check_redditor(self, args):\n\n for user in args.redditor:\n if any(char.isalpha() for char in user[1]) \\\n or self._illegal_chars.search(user[1]) != None \\\n or int(user[1]) == 0:\n raise ValueError", "def check_valid_fernet(value):\n try:\n decoded = base64.urlsafe_b64decode(value)\n if len(decoded) != 32: return False\n return True\n except binascii.Error:\n return False", "def validate_admin (admin_secret):\n\n try:\n admin_secret = admin_secret.encode()\n hashed = app.config['ADMIN_SECRET'].encode()\n return bcrypt.checkpw(admin_secret, hashed)\n\n except Exception as e:\n return False", "def is_secret_string(value):\n if not isinstance(value, basestring):\n return False\n return bool(_secret_string_pattern.match(value))", "def validate_db_admin (db_secret):\n\n try:\n db_secret = db_secret.encode()\n hashed = app.config['DB_SECRET'].encode()\n return bcrypt.checkpw(db_secret, hashed)\n except Exception as e:\n return False", "def check_correct_password(status, pwd):\n # generate key from raw password\n key = generate_key_from_password(\n pwd, salt=status.get(\"salt\")\n )\n f = Fernet(key)\n try:\n decrypt_output = f.decrypt(status[\"encrypted_check_phrase\"])\n except InvalidToken:\n return False\n return decrypt_output.decode(\"utf-8\") != settings.CHECK_PHRASE", "def isValid(text):\n return bool(re.search(r'\\R2D2\\b', text, re.IGNORECASE))", "def check_raw_string(self, string, is_bstring=True):\n self.log(u\"Checking the given byte string\")\n self.result = ValidatorResult()\n if self._are_safety_checks_disabled(u\"check_raw_string\"):\n return self.result\n if is_bstring:\n self._check_utf8_encoding(string)\n if not self.result.passed:\n return self.result\n string = gf.safe_unicode(string)\n self._check_not_empty(string)\n if not self.result.passed:\n return self.result\n self._check_reserved_characters(string)\n return self.result", "def test_accept_letter_unmasked_masked(self):\n # Prepare test\n letter_a = 'a'\n letter_b = 'b'\n self.console.in_valid_letter = MagicMock(return_value=letter_b)\n self.console.word.is_masked.side_effect = [False, True, StopIteration]\n\n # Run test\n result = self.console.accept_letter(letter_a)\n\n # Evaluate test\n self.assertEqual(letter_b, result)", "def isValid(ssToCheck):\n\n # Validate ssToCheck argument.\n if not isinstance(ssToCheck, str) or len(ssToCheck) < 2:\n raise ShortStrException('ssToCheck argument must be a string at least two characters long')\n\n if RUNNING_PY_2:\n checksum = zlib.adler32(ssToCheck[:-1].decode('utf-8'))\n else:\n checksum = zlib.adler32(bytes(ssToCheck[:-1], encoding='utf-8'))\n\n return ssToCheck[-1] == GLYPHS[checksum % LEN_GLYPHS] # Make sure last character in ssToCheck is the correct checksum character.", "def is_valid(passwd: str) -> bool:\n return (\n re.search(r'abc|bcd|cde|def|efg|fgh|ghi|hij|jkl|klm|lmn|mno|nop|opq|pqr|qrs|rst|stu|tuv|uvw|vwx|wxy|xyz', passwd) is not None and\n all([c not in passwd for c in 'iol']) and\n re.search(r'([a-z])\\1.*([a-z])\\2', passwd) is not None\n )", "def is_right_secret(self, secret):\n dct = []\n for digit in secret:\n if digit in dct:\n return 0\n else:\n dct.append(digit)\n return True", "def test_dna_validator(self):\n \n dna = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test invalid characters\n invalid_dna1 = 'EETGGAGACGGAAACASTCCGAGGACATCCGGAGGAACCCGGGGAGTZVTHHCTGAGTGGTAAT'\n # test invalid length\n invalid_dna2 = 'GGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test for invalid internal stop\n invalid_dna3 = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTTGAGTGGTAATC'\n expected_validationT = True\n expected_validationF = False\n result_validation1 = dna_validator(dna)\n self.assertEqual(result_validation1, expected_validationT)\n result_validation2 = dna_validator(invalid_dna1)\n self.assertEqual(result_validation2, expected_validationF)\n result_validation3 = dna_validator(invalid_dna2)\n self.assertEqual(result_validation3, expected_validationF)\n result_validation4 = dna_validator(invalid_dna3)\n self.assertEqual(result_validation4, expected_validationF)", "def Check_is_valid(self, String):\r\n\r\n if self.Special_Names.__contains__(String):\r\n return False\r\n elif self.Special_Names_no_Operands.__contains__(String):\r\n return False\r\n elif self.Special_Names_one_Operands.__contains__(String):\r\n return False\r\n elif self.Special_Names_two_Operands.__contains__(String):\r\n return False\r\n elif self.Data_types.__contains__(String):\r\n return False\r\n elif self.Registers.__contains__(String):\r\n return False\r\n elif self.Irvine32_functions.__contains__(String):\r\n return False\r\n elif String.__contains__('\"'):\r\n return False\r\n elif String.__contains__('\\''):\r\n return False\r\n elif String.__contains__('.'):\r\n return False\r\n elif String[0].isdecimal():\r\n return False\r\n if len(self.Data_variables) > 0:\r\n if self.Data_variables.__contains__(String):\r\n return False\r\n if len(self.Functions_names) > 0:\r\n if self.Functions_names.__contains__(String):\r\n return False\r\n if len(self.Labels_names) > 0:\r\n if self.Labels_names.__contains__(String):\r\n return False\r\n return True", "def test_accept_letter_masked(self):\n # Prepare test\n letter = 'a'\n self.console.in_valid_letter = MagicMock(return_value='b')\n self.console.word.is_masked.return_value = True\n\n # Run test\n result = self.console.accept_letter(letter)\n\n # Evaluate test\n self.assertEqual(letter, result)", "def check_word(word):\r\n if word in word_master:\r\n valid = True\r\n else:\r\n valid = False\r\n return valid", "def check_ean(eancode):\n if not eancode:\n return True\n if len(eancode) <> 13:\n return False\n try:\n int(eancode)\n except:\n return False\n return ean_checksum(eancode) == int(eancode[-1])", "def test_is_valid(self, address):\n self.test_string(address)\n self.test_alnum(address)", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def ValidateEntry(Secret_Word_Masked, Secret_Word_Masked_Unspaced, Used_Char):\n Guess = input(\"\\nGuess A Letter: \")\n Guess = Guess.lower()\n\n while (len(Guess) > 1 or not Guess.isalpha()):\n print(\"\\nInvalid Entry: \\'%s\\'\" %Guess)\n print(\"Alphabetic Character(s) Already Used: %s\" %Used_Char)\n print(\"So Far The Secret Word is:\\n%s\" %Secret_Word_Masked)\n Guess = input(\"\\nPlease Enter Only A Single Alphabetic Character: \")\n Guess = Guess.lower()\n\n print(\"\\nValid Entry: \\'%c\\'\" %Guess)\n\n return Guess", "def verify_revealed_secret(self, revealed_secret: Scalar):\n r = self.prev_round_with_same_leader\n if r:\n dataset = self.datasets[r]\n if dataset:\n proof = dataset.proof\n else:\n proof = NODE_INFOS[self.leader].initial_proof\n\n return proof is not None and pvss.verify_secret(revealed_secret, proof.commitments, T)", "def _validate_word(self, word):\n return type(word) == type('a') and set(self._letters) == set(list(word))", "def win_condition(self):\n if self.letters_wrong < 5:\n if '__ ' in self.new_string:\n return False\n else:\n return True\n else:\n return True", "def verify_pwd_str(provided_password: str, stored_hash: str) -> bool:\n salt = stored_hash[:64].encode('ascii')\n stored_password = stored_hash[64:]\n provided_password = provided_password.encode('utf-8')\n pwdhash = hashlib.pbkdf2_hmac('sha256', provided_password, salt, 100000)\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n return pwdhash == stored_password", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def verify_secret(prop_name, value):\n\n hashed = hashlib.sha256(value.encode('UTF-8')).hexdigest()\n has_must_be = RUN_CONFIG.get(prop_name)\n\n return hashed == has_must_be", "def pw_is_viable(password: str) -> bool:\n logging.debug(\"called\")\n if not any([\n not password,\n len(password) < 8,\n not any(map(lambda x: x.isdigit(), password)),\n not any(map(lambda x: x.isupper(), password)),\n not any(map(lambda x: x.islower(), password)),\n not any(map(lambda x: x in SPECIAL_CHARACTERS, password)),\n ]):\n return True\n else:\n raise PasswordError(\"Password should contain at least a digit, an uppercase, a lower case, and special \"\n \"characters and should be at least 8 digits in total.\", password)", "def validate_strand(strand: str) -> bool:\n strand = strand.upper()\n count = dict(Counter(strand))\n for k in count.keys():\n if k not in NUCLEOTIDES:\n raise Exception(\"Invalid DNA sequence\")\n return True", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def is_valid_client_secret(client_secret: str) -> bool:\n return (\n 0 < len(client_secret) <= 255\n and CLIENT_SECRET_REGEX.match(client_secret) is not None\n )", "def check(self, plain_string, hashed_string, options={}, driver=None):\n return (\n self.get_driver(driver)\n .set_options(options or self.get_config_options(driver))\n .check(plain_string, hashed_string)\n )", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def check_win(secret_word, old_letters_guessed):\r\n if show_hidden_word(secret_word, old_letters_guessed) == secret_word:\r\n return True\r\n else:\r\n return False", "def verify_password(entered_password):\n return PASSWORD_RE.match(entered_password)", "def validate(self, word):\n\n return self.valid_word(word)", "def validate(self, word):\n\n return self.valid_word(word)", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def validate_puzzle_string(self):\n is_puzzle_string_valid = False\n while is_puzzle_string_valid is False:\n question = \"Enter a valid puzzle. (81 inline digits where zeros \" +\\\n \"represent empty spots) E.g. 01040506.... and so on\\npuzzle\"\n puzzle_parameter = self.ask_user_input(question)\n if not puzzle_parameter.isdigit():\n print(\"The puzzle should contain only digits, please try again\")\n elif len(puzzle_parameter) == 81:\n is_puzzle_string_valid = True\n self.current_response = puzzle_parameter\n else:\n print(\"The puzzle should contain exactly 81 digits, please try again\")\n return is_puzzle_string_valid", "def checkValidString(self, string: str) -> bool:\n @lru_cache(None)\n def dp(index, diff):\n \"\"\"\n index is the index of string\n diff the counts of '(' - counts of ')'\n \"\"\"\n\n if index == len_s:\n return diff == 0\n if abs(diff) > len_s - index:\n return False\n c = string[index]\n index += 1\n if c == '(':\n return dp(index, diff + 1)\n elif c == ')':\n if diff - 1 < 0:\n return False\n return dp(index, diff - 1)\n else:\n return dp(index, diff + 1) or dp(index, diff - 1) or dp(index, diff)\n\n len_s = len(string)\n return dp(0, 0)", "def test_secrets_add_wrong_format(secret):\n reana_token = \"000000\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n message = 'For literal strings use \"SECRET_NAME=VALUE\" format'\n\n result = runner.invoke(cli, [\"secrets-add\", \"-t\", reana_token, \"--env\", secret])\n assert result.exit_code == 1\n assert message in result.output", "def test_good_values_for_validate_guid(good_value):\n bcvalidators.validate_guid(good_value)", "def is_valid_word(word, hand, word_list):\n failure=True\n word=word.lower()\n if word not in word_list:\n failure=False\n for i in word:\n w=hand.get(i,0)\n if w==0:\n failure=False\n break\n return failure", "def test_string(self, s):\n\n data = s.split(' ')\n\n origin = ' '.join(data[0:-1])\n if not origin:\n return False\n \n origin_hashed = self.hash_with_salt(origin)\n\n return origin_hashed == s", "def test_is_valid_hex(self):\n self.assertTrue(is_valid_hex('#aabb11'))\n self.assertTrue(is_valid_hex('#000'))\n self.assertTrue(is_valid_hex('#aaa'))\n self.assertFalse(is_valid_hex('black'))\n self.assertFalse(is_valid_hex('bl(ack'))", "def _validate_app_id(self, app_id):\n try:\n uuid_hex = UUID(app_id)\n regex = APP_SECRET_REGEX_LIST[0]\n m = regex.search(app_id)\n if not m:\n return False\n elif uuid_hex or m:\n return True\n except ValueError:\n return False", "def test_allowed_string(self):\n val = DwcaValidator(yaml.load(self.yaml_allowed_string, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'abundance': 'many'}\n self.assertTrue(val.validate(document))\n document = {'abundance': 'female'}\n self.assertFalse(val.validate(document))", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return hsh == get_hexdigest(algo, salt, raw_password)", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return hsh == get_hexdigest(algo, salt, raw_password)", "def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))", "def isValid(t_id):\n\tstr_id=str(t_id).strip()\n\treturn str_id.isdigit()", "def needs_recoding(strings):\n for string in strings:\n for char in string:\n if 127 < ord(char) < 256:\n return True\n return False", "def is_valid_input(letter_guessed):\n\n #calulate the lengh of the letters\n NumberOfCharacters = (len(letter_guessed))\n #convert input letters to Underscore\n NumberOfUnderscore = (NumberOfCharacters * \"_\")\n\n\n # All the letters in English\n EnglishLetter = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMOPQRSTUVWXYZ\"\n\n\n if NumberOfCharacters > 1:\n print(\"false\")\n\n # If the user entered English character the string will print the character a non-English character (for example, a sign such as: &, *), the string will print \"E2\n elif letter_guessed in EnglishLetter:\n print(\"true\")\n else:\n print(\"false\")", "def test_equality(cleartextpw, cryptedpw=\"\"):\n if not cryptedpw:\n return crypt.crypt(cleartextpw, '$6${}$'.format(salt(83)))\n else:\n if cryptedpw == 'x' or cryptedpw == '*':\n raise NotImplementedError(\n \"Sorry, currently no support for shadow passwords\")\n\n return crypt.crypt(cleartextpw, cryptedpw) == cryptedpw", "def is_valid(self, user_specific_config: Any, factor: str) -> bool:", "def check_win(secret_word, old_letters_guessed):\n for i in secret_word:\n if i not in old_letters_guessed:\n return False\n # all letters from secret_word are in old_letters_guessed - the player won\n return True", "def isValidWord(word, hand, wordList):\n status = []\n status2 = []\n\n # check if the input is valid\n if not word in wordList:\n return False\n \n for letter in word:\n \n ## commented out bc global var not available for the individual exercise\n # if not letter in SCRABBLE_LETTER_VALUES.keys():\n # status.append(False)\n \n if not letter in hand.keys():\n status.append(False)\n \n if False in status:\n return False\n \n # check if there are enough values in hand for the guess\n for key, value in getFrequencyDict(word).items():\n \n if not key in hand or value > hand[key]:\n status2.append(False)\n\n if False in status2:\n return False\n \n return True", "def validate(self, encrypted_token: str) -> bool:\n payload, timestamp_ms, crc = self.unsleeve(encrypted_token)\n ts_bytes = timestamp_ms.to_bytes(8, 'big')\n\n computed_crc = zlib.crc32(payload + ts_bytes)\n\n if crc == computed_crc:\n return in_range(timestamp_ms, deadline=self.token_life_ms)\n\n return False", "def test_correct_barcode(self):\r\n original = 'ATTTTTTTTTCG'\r\n recieved = 'ATTTTTTTTTTT'\r\n possibilities = ['TGTATTCGTGTA', 'ATTTTTTTTTCG', 'TGTAGGCGTGTA',\r\n 'TGTAGAAGTGTA', 'TGTAGGCGTATA', 'TGTAAAAAAAAA']\r\n decoded, num_errors = barcode.correct_barcode(recieved, possibilities)\r\n self.assertEqual(decoded, original)\r\n self.assertEqual(num_errors, 2)", "def properDayInput(day):\r\n possibleStrings = [\"m\",\"mon\",\"monday\",\"tu\",\"tue\",\"tues\",\"tuesday\",\"w\",\r\n \"we\",\"wed\",\"wednesday\",\"th\",\"tr\",\"r\", \"thu\",\"thur\",\"thurs\",\"thursday\",\"f\",\"fr\",\r\n \"fri\",\"friday\",\"sa\",\"sat\",\"saturday\",\"su\",\"sun\",\"sunday\"]\r\n \r\n validString = False\r\n for i in range(0, len(possibleStrings)):\r\n if possibleStrings[i] == day.lower():\r\n validString = True\r\n return validString", "def is_valid_yubikey_format(otp: str) -> bool:\n\n return ALPHABET_RE.match(otp) and True or False", "def validate_ecl(ecl: str) -> bool:\n return ecl in [\"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\", \"oth\"]", "def validate_password(self, password):\n return self._password == encrypt_password(password,\n b64decode(str(self._salt)))", "def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num", "def is_valid_flag(flag):\n return False if re.match(\"^FAUST_[A-Za-z0-9/\\\\+]{32}$\", flag) is None else True", "def check_string( pname, use ):\n for l in pname:\n if l in string.letters: continue\n if l in string.digits : continue\n if l =='_' : continue\n print( \"your \"+use+\" (\" + pname + \") contains invalid characters, please choose another one!\" )\n return False\n return True", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False", "def isValidTest(self):\n if not self.hasError():\n return False\n distance = dameraulevenshtein(self.word, self.error) \n if(distance > 1):\n return False\n regex = '.*[^a-zA-Z].*'\n if re.match(regex, self.word) or re.match(regex, self.error):\n return False\n return True", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def is_valid_word(word_test, hand_word, word_list):\n count = 0\n word_test = list(word_test)\n for i_1 in word_test:\n if i_1 in hand_word:\n count += count\n if count == len(word_test) and word_test[i_1] == word_list[i_1]:\n return True\n return False", "def password_is_valid_task_2(row):\n # XOR the two positions in the password\n return (row['letter'] == row['password'][row['policy'][0] - 1]) != \\\n (row['letter'] == row['password'][row['policy'][1] - 1])", "def check_ascii_compliance(plaintext: bytes) -> bool:\n return all(c < 128 for c in plaintext)", "def avoids (word, frbdn_letters):\n for letter in frbdn_letters:\n if letter in word: \n return False\n return True", "def check_win(secret_word, old_letters_guessed):\n for letters_guessed in secret_word:\n if letters_guessed in old_letters_guessed:\n continue\n else:\n return False\n return True", "def exploit(correction_file, secret):\n # Try to login with the old determinist token generation\n token = hashlib.sha1(base64.b64encode(b\"random_user\")).hexdigest()\n out, _ = run_cmd(correction_file, ['random_user', '', '/get-token'])\n if token in out:\n print(\"I'm still able to craft a valid token!\")\n return True\n\n # Try to generate a bunch of tokens to see if the algorithm is deterministic\n # (in the real world a manual review of the algorithm is of course needed)\n outputs = set()\n for i in range(20):\n out, _ = run_cmd(correction_file, ['test', '', '/get-token'])\n outputs.add(out)\n if len(outputs) != i+1:\n print('The algorithm used to generate the token looks too deterministic\\n')\n return True\n\n return False", "def naughty_nice_check(string: str) -> bool:\n vowels = 0\n double_letters = 0\n forbidden_string = False\n last_letter = ''\n for i, letter in enumerate(string):\n vowels += 1 if letter in 'aeiou' else 0\n double_letters += 1 if letter == last_letter else 0\n if last_letter + letter in FORBIDDEN_STRINGS:\n forbidden_string = True\n last_letter = letter\n return vowels >= 3 and double_letters >= 1 and not forbidden_string", "def is_id(string):\n regex = re.compile('[0-9a-f]{32}\\Z', re.I)\n if bool(regex.match(string)):\n return True\n\n return False", "def isRNANucleotide(letter):\n if letter == 'A' or letter == 'C' or letter == 'G' or letter == 'U':\n return True\n return False", "def check_for_validity_puzzle_1(limits: tuple, rep_char: str, password: str):\n\n reps = password.count(rep_char)\n\n lower, upper = limits\n\n if lower <= reps <= upper:\n return True\n else:\n return False", "def is_valid(self):\n if len(self) <= 64 and re.match(RE_VALID_UID, self):\n return True\n\n return False", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return constant_time_compare(hsh, get_hexdigest(algo, salt, raw_password))", "def check_win(secret_word, old_letters_guessed):\n j=len(secret_word)\n for i in secret_word:\n j -= 1\n if i in old_letters_guessed:\n if j == 0:\n return True\n else: \n continue\n else:\n return False", "def is_any_valid_docker_container_id(string: str) -> bool:\n return re.fullmatch(\"^[0-9a-f]{64}$\", string) is not None", "def check(self, password):\n\n if len(password) < self.min_length:\n return False\n\n digits = len(findall(r\"\\d\", password))\n if digits < self.min_digits:\n return False\n\n special_chars = sum(v for k, v in Counter(password).items() if k in punctuation)\n if special_chars < self.min_special:\n return False\n\n alpha_chars = sum(v for k, v in Counter(password).items() if k in ascii_letters)\n if alpha_chars < self.min_alpha:\n return False\n\n upper_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_uppercase\n )\n if upper_chars < self.min_upper:\n return False\n\n lower_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_lowercase\n )\n if lower_chars < self.min_lower:\n return False\n\n if self.check_breaches and check_password(password):\n return False\n\n if self.func and not self.func(password):\n return False\n\n return True", "def test_shorter_valid_string_is_contained(tricky_trie):\n assert tricky_trie.contains('bbb')", "async def verify(self, ctx, *, verification_string: str):\r\n\r\n await ctx.message.delete()\r\n\r\n veriflogs_channel = ctx.guild.get_channel(config.veriflogs_chanid)\r\n verification_role = ctx.guild.get_role(config.read_rules_roleid)\r\n verification_wanted = config.verification_code\\\r\n .replace(\"[discrim]\", ctx.author.discriminator)\r\n\r\n # Do checks on if the user can even attempt to verify\r\n if ctx.channel.id != config.verification_chanid:\r\n resp = await ctx.send(\"This command can only be used \"\r\n f\"on <#{config.verification_chanid}>.\")\r\n await asyncio.sleep(config.sleep_secs)\r\n return await resp.delete()\r\n\r\n if verification_role in ctx.author.roles:\r\n resp = await ctx.send(\"This command can only by those without \"\r\n f\"<@&{config.read_rules_roleid}> role.\")\r\n await asyncio.sleep(config.sleep_secs)\r\n return await resp.delete()\r\n\r\n # Log verification attempt\r\n await self.bot.update_logs(\"Verification Attempt\",\r\n ctx.author.id,\r\n veriflogs_channel,\r\n log_text=verification_string,\r\n digdepth=50, result=-1)\r\n\r\n # Check verification code\r\n if verification_string.lower().strip() == verification_wanted:\r\n resp = await ctx.send(\"Success! Welcome to the \"\r\n f\"club, {str(ctx.author)}.\")\r\n await self.bot.update_logs(\"Verification Attempt\",\r\n ctx.author.id,\r\n veriflogs_channel,\r\n digdepth=50, result=0)\r\n await asyncio.sleep(config.sleep_secs)\r\n await ctx.author.add_roles(verification_role)\r\n await resp.delete()\r\n else:\r\n resp = await ctx.send(f\"Incorrect password, {str(ctx.author)}.\")\r\n await asyncio.sleep(config.sleep_secs)\r\n await resp.delete()", "def brepalgo_IsValid(*args):\n return _BRepAlgo.brepalgo_IsValid(*args)", "def check_dog_breed(dog):\n if not isinstance(dog.breed, str):\n raise NotStringError(\"Dog breed entered is not a string\")", "def verify_secret(secret: str, known_hash: bytes, known_salt: bytes) -> bool:\n unknown_bytes = secret.encode(SECRET_ENCODING)\n unknown_hash = hash_secret_raw(unknown_bytes, known_salt, **CRYPTO_PARAMS)\n\n return unknown_hash == known_hash", "def test_kms_re_encrypt_fails_without_b64_secret(self):\n with self.assertRaises(SystemExit):\n ef_utils.kms_re_encrypt(self.mock_kms, self.service, self.env, self.secret)", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def check_valid_input(letter_guessed, old_letters_guessed):\r\n\tletter_guessed = letter_guessed.lower()\r\n\tif (len(letter_guessed) > 1):\r\n\t\treturn False\r\n\tif (not letter_guessed.isalpha()):\r\n\t\treturn False\r\n\tif (letter_guessed in old_letters_guessed):\r\n\t\treturn False\r\n\treturn True" ]
[ "0.8024349", "0.75399673", "0.5762778", "0.5715149", "0.56168044", "0.56168044", "0.543279", "0.54015696", "0.52972245", "0.5295794", "0.52827865", "0.5269401", "0.52549756", "0.5223828", "0.5214541", "0.5210419", "0.5160051", "0.5159024", "0.5144563", "0.5132561", "0.5131473", "0.51224166", "0.5117374", "0.51080954", "0.5095043", "0.5075796", "0.505644", "0.5049236", "0.5011334", "0.50078064", "0.49926955", "0.49478292", "0.49465385", "0.4922422", "0.49141335", "0.49086875", "0.49000806", "0.48869348", "0.4884666", "0.48832324", "0.48817644", "0.48816797", "0.4878731", "0.4867424", "0.4867424", "0.485571", "0.484546", "0.48398662", "0.48323953", "0.4828016", "0.4822569", "0.48121402", "0.48111287", "0.4806645", "0.48064315", "0.4799539", "0.4799539", "0.47995308", "0.47992533", "0.47851524", "0.47823462", "0.47762704", "0.47762603", "0.47697976", "0.47668326", "0.475716", "0.47503132", "0.47462925", "0.47443318", "0.47398698", "0.47237372", "0.47220436", "0.4721639", "0.47215837", "0.47194332", "0.47171575", "0.47058344", "0.4705294", "0.47048834", "0.4703636", "0.46984658", "0.46920943", "0.46844852", "0.46842253", "0.46838117", "0.46808487", "0.46779415", "0.467744", "0.46747315", "0.4674698", "0.4667768", "0.46635342", "0.4660513", "0.4658567", "0.4657593", "0.46575734", "0.46557257", "0.46539608", "0.465243", "0.46521226" ]
0.6641542
2
Determines whether fileToCheck is an appropriately formatted FASTA file. An appropriately formatted FASTA file is returned in correctFormatting.
def main(fileToCheck, minLength=-1, maxLength=-1): # Initialise variables. lineCount = 1 # The number of the line being examined. Used for displaying error messages. protDescription = True # Whether or not we are currently expecting a line starting with >. firstLine = True # Whether or not we are currently examining the first line of the file. proteinsInFile = {} # A dictionary indexed by the protein description line of the FASTA file. # The value of each entry is the correctly formatted protein sequence corresponding to the index. # Strip off all excess whitespace, and split the string into the individual lines of the file. checking = fileToCheck.rstrip() checking = checking.lstrip() checking = checking.split('\n') for line in checking: line = line.rstrip() if firstLine: # True if we have just started parsing the file string, and haven;t yet examined any lines. if line[0] == '>': currentProt = line # Record the description line of the protein which is about to have its sequence inspected. currentSeq = '' # Initialise the sequence of the protein. protDescription = False # We are now expecting a protein sequence, not a protein description. firstLine = False else: # The first line of the file MUST be a protein description line (i.e. start with '>'). If the line was not # the beginning of a protein record, terminate the program. errorMessage = "Expected line " + str(lineCount) + " to start with a >, but instead got: " + line return 1, errorMessage elif protDescription: # This is true only if a line beginning with a '>' is expected. if line[0] == '>': # Expected a protein description line, and found a protein description line. This means that the entire sequence # of the currentProt protein has been found (i.e. we have finished inspecting the sequence of a protein, and # have found the protein to be valid). Now determine if the length of the sequence is within the user # specified bounds. if minLength == -1: if maxLength == -1: # If there are no restrictions on the protein sequence length, then record the protein and its sequence. proteinsInFile[currentProt] = currentSeq elif len(currentSeq) <= maxLength: # If there is no minimum length restriction, and the protein sequence is not longer than the maximum # sequence length permitted, then record the protein and its sequence. proteinsInFile[currentProt] = currentSeq elif len(currentSeq) >= minLength: if maxLength == -1: # If there is no maximum length restriction, and the protein sequence is not shorter than the minimum # sequence length permitted, then record the protein and its sequence. proteinsInFile[currentProt] = currentSeq elif len(currentSeq) <= maxLength: # If the protein sequence is not shorter than the minimum sequence length permitted and not longer # than the maximum length permitted, then record the protein and its sequence. proteinsInFile[currentProt] = currentSeq currentProt = line # Record the description line of the protein which is about to have its sequence inspected. currentSeq = '' # Initialise the sequence of the protein. protDescription = False # We are now expecting a protein sequence, not a protein description. else: # If the line does not begin with a '>', and it is expected to, it is possible that the amino acid sequence # is split over multiple lines. if line.isalpha(): # If every character on the line is a letter, then the line contains a valid portion of the sequence. # Add the uppercase version of the sequence portion to the sequence currently being recorded. currentSeq += line.upper() else: # If the line did not contain only letters, terminate the program. errorMessage = "Expected line " + str(lineCount) + " to start with a >, but instead got: " + line return 1, errorMessage else: # If an amino acid sequence is expected. if line.isalpha(): # If the line is all alphabetic characters, write the line out and indicate that we are expecting a # protein description line next (i.e. one beginning with a '>'). currentSeq += line.upper() protDescription = True else: # If the line did not contain only letters, terminate the program. errorMessage = "Expected line " + str(lineCount) + " to contain only letters, but instead got: " + line return 2, errorMessage lineCount += 1 # Catch the final protein from the file, and determine whether it should be recorded. if minLength == -1: if maxLength == -1: proteinsInFile[currentProt] = currentSeq elif len(currentSeq) <= maxLength: proteinsInFile[currentProt] = currentSeq elif len(currentSeq) >= minLength: if maxLength == -1: proteinsInFile[currentProt] = currentSeq elif len(currentSeq) <= maxLength: proteinsInFile[currentProt] = currentSeq if len(proteinsInFile.keys()) < 2: # There are too few protein sequences entered errorMessage = ("Not enough unique protein sequences have been entered." + " This is possibly caused by not enough sequences of the required minimum and maximum length being provided." ) return 3, errorMessage elif protDescription: # Return an indication that the FASTA file is correctly formatted. outputString = '' for i in proteinsInFile.keys(): outputString += i + '\n' + proteinsInFile[i] + '\n' return 0, outputString[:-1] else: # The file did not end with a protein sequence. errorMessage = "Reached the end of the file, but no protein sequence found for the final protein." return 3, errorMessage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_style(file_path, style_config):\n\n with open(file_path, 'r') as f:\n content = f.read()\n is_valid_header = (len(content) == 0 or content.startswith(\n PythonFormatter.standard_header))\n\n _, _, changed = yapf.yapflib.yapf_api.FormatFile(\n file_path, style_config=style_config, in_place=False)\n return (not changed, is_valid_header)", "def _check_style(file_path, clang_format_bin):\n with open(file_path, 'r') as f:\n is_valid_header = f.read().startswith(CppFormatter.standard_header)\n\n cmd = [\n clang_format_bin,\n \"-style=file\",\n \"-output-replacements-xml\",\n file_path,\n ]\n result = subprocess.check_output(cmd).decode(\"utf-8\")\n if \"<replacement \" in result:\n is_valid_style = False\n else:\n is_valid_style = True\n return (is_valid_style, is_valid_header)", "def check_diff(filepath, original, formatted):\n diff = list(unified_diff(original, formatted))\n if diff:\n print('{} diff:'.format(filepath))\n print((\"\".join(diff)).replace('\\r', ''))\n print()\n\n return bool(diff)", "def is_file_parsable(cls, filepath, beginning=None):\n\n # TODO can we redirect the loc calls from here so they aren't shown\n # since we're just testing if parsable and an error message \n # would give the wrong impression\n # or restructure the way our logger works, maybe accept a logger in\n # the subsequent calls\n\n super().is_file_parsable(filepath)\n\n # MAGIC USAA doesn't use a header and the first line will do\n lines = [l for l in cls.yield_header(filepath, rows=1)]\n try:\n first_line = lines[0]\n except IndexError:\n logging.error(\"file line count is 0: %s\" % filepath)\n return False\n is_parsable = cls._run_parse_checks(first_line, filepath)\n\n # NOTE b/c USAA does not use a header, check a few properties of the data\n return is_parsable", "def check_fasta(filename):\n fa_exts = [\".fa\", \".fasta\"]\n p, ext = os.path.splitext(filename)\n if not ext.lower() in fa_exts: # TODO: check if bowtie2, STAR handle gzipped fa files\n raise ValueError(\"Error: \\\"\" + filename + \"\\\" does not match expected extensions: \" + str(fa_exts))", "def check_if_fasta(file):\n if os.path.splitext(file)[1] != \".zip\":\n with open(file, \"r\") as handle:\n fasta = SeqIO.parse(handle, \"fasta\")\n return any(fasta)\n else:\n return False", "def detect_format(file):\n logging.debug('Detecting the log format')\n\n format = False\n\n # check the format using the file (for formats like the W3cExtendedFormat one)\n format = Parser.check_format(file)\n\n # check the format using the first N lines (to avoid irregular ones)\n lineno = 0\n limit = 100000\n while not format and lineno < limit:\n line = file.readline()\n if not line: # if at eof, don't keep looping\n break\n\n lineno = lineno + 1\n\n logging.debug(\"Detecting format against line %i\" % lineno)\n format = Parser.check_format(line)\n\n try:\n file.seek(0)\n except IOError:\n pass\n\n if not format:\n fatal_error(\"cannot automatically determine the log format using the first %d lines of the log file. \" % limit +\n \"\\nMaybe try specifying the format with the --log-format-name command line argument.\" )\n return\n\n logging.debug('Format %s is the best match', format.name)\n return format", "def clang_check(file_path, hunks=None):\n \n assert not file_path is None and not file_path == \"\"\n \n rel_path_from_peloton_dir = os.path.relpath(file_path, PELOTON_DIR)\n\n if rel_path_from_peloton_dir in FORMATTING_FILE_WHITELIST:\n return True\n\n file_status = True\n\n # Run clang-format on the file and get output (not inline!)\n formatted_src = clang_format(file_path, None, inline=False)\n\n # For Python 3, the above command gives a list of binary sequences, each\n # of which has to be converted to string for diff to operate correctly.\n # Otherwise, strings would be compared with binary sequences and there\n # will always be a big difference.\n formatted_src = [line.decode('utf-8') for line in formatted_src]\n # Load source file\n with open(file_path, \"r\") as file:\n src = file.readlines()\n\n # Do the diff\n difference = difflib.Differ()\n diff = difference.compare(src, formatted_src)\n line_num = 0\n for line in diff:\n code = line[:2]\n if code in (\" \", \"- \"):\n line_num += 1\n if code == '- ':\n if file_status:\n LOG.info(\"Invalid formatting in file : \" + file_path)\n LOG.info(\"Line %d: %s\", line_num, line[2:].strip())\n file_status = False\n\n return file_status", "def should_format(\n filename: Path, include_patterns: Iterable[str], exclude_patterns: Iterable[str]\n) -> Tuple[bool, str]:\n from fnmatch import fnmatch\n\n if any(fnmatch(os.path.abspath(filename), pattern) for pattern in exclude_patterns):\n return False, \"Excluded file\"\n\n filename_no_ext, ext = os.path.splitext(filename)\n # ignore .py file that has a jupytext configured notebook with the same base name\n ipynb_filename = filename_no_ext + \".ipynb\"\n if ext == \".py\" and os.path.isfile(ipynb_filename):\n with open(ipynb_filename, \"rb\") as f:\n if b\"jupytext\" not in f.read():\n return True, \"\"\n with open(filename, \"rb\") as f:\n if b\"jupytext:\" not in f.read():\n return True, \"\"\n return False, \"Jupytext generated file\"\n\n if any(fnmatch(os.path.basename(filename), pattern) for pattern in include_patterns):\n return True, \"\"\n\n return False, \"Unknown file type\"", "def is_fasta(infile):\n\top_infile = open(infile, \"r\")\n\tfor line in op_infile:\n\t\tif line.startswith(\">\"):\n\t\t\tfasta = True\n\t\telse:\n\t\t\tfasta = False\n\t\tbreak\n\treturn fasta\n\top_infile.close()", "def verify_valid_fasta_format(input_fasta_fp):\r\n\r\n fasta_f = open(input_fasta_fp, \"U\")\r\n\r\n try:\r\n for label, seq in parse_fasta(fasta_f):\r\n continue\r\n except RecordError:\r\n raise RecordError(\"Input fasta file not valid fasta format. Error \" +\r\n \"found at %s label and %s sequence \" % (label, seq))\r\n\r\n fasta_f.close()", "def __check_format(node, lint_ctx, profile: str, allow_ext=False):\n if \"format_source\" in node.attrib and (\"ext\" in node.attrib or \"format\" in node.attrib):\n lint_ctx.warn(\n f\"Tool {node.tag} output '{node.attrib.get('name', 'with missing name')}' should use either format_source or format/ext\",\n node=node,\n )\n if \"format_source\" in node.attrib:\n return True\n if node.find(\".//action[@type='format']\") is not None:\n return True\n # if allowed (e.g. for discover_datasets), ext takes precedence over format\n fmt = None\n if allow_ext:\n fmt = node.attrib.get(\"ext\")\n if fmt is None:\n fmt = node.attrib.get(\"format\")\n if fmt == \"input\":\n message = f\"Using format='input' on {node.tag} is deprecated. Use the format_source attribute.\"\n if Version(str(profile)) <= Version(\"16.01\"):\n lint_ctx.warn(message, node=node)\n else:\n lint_ctx.error(message, node=node)\n\n return fmt is not None", "def preliminary_file_check(self):\n\n if self.has_error():\n return False\n\n if not self.filepath:\n self.add_error(\"A file was specified!\")\n return False\n\n if not isfile(self.filepath):\n self.add_error(\"The file was not found: %s\" % basename(self.filepath))\n return False\n\n if getsize(self.filepath) < 1:\n self.add_error(\"The file is empty (no bytes): %s\" % basename(self.filepath))\n return False\n\n if self.file_ext in ['xls', 'xlsx']:\n self.is_excel = True\n\n return True", "def check_file(a_file):\n a_file = os.path.abspath(a_file) # for better error messages\n if not os.path.isfile(a_file):\n raise argparse.ArgumentTypeError('No such file: {}'.format(a_file))\n return a_file", "def isMayaFile(potentialMayaFile):\n\n pass", "def _IsWellFormattedFilePath(path):\n return path.startswith(SRC) and path.endswith(_OWNERS)", "def _verify_original_file_type(self, file_name):\n self._original_file_path = file_name\n\n available_instrument_types = INSTRUMENT_TO_TYPE_MAP[self._instrument]\n\n instrument_file_type = None\n\n # Check if file contains any of the necessary identifiers\n for available_type in available_instrument_types:\n for identifier in available_instrument_types[available_type]:\n if (search(identifier, self._original_file_path)) or (search(identifier, self._original_file_path)):\n instrument_file_type = available_type\n\n if not instrument_file_type:\n raise ValueError(INVALID_FILE_TYPE_ERROR.format(self._original_file_path, self._instrument))\n else:\n self._original_file_type = instrument_file_type", "def check_file_flag(file):\n return process_file_flag(file, None)", "def check_filename_convention(filename):\n\n if \"eduhub\" in filename.lower():\n filename = filename.lower().split(\"eduhub_\")[1]\n\n filename = filename.replace(\" \", \"\")\n\n end_date_str = filename[:10]\n start_date_str = filename[11:11 + 10]\n\n try:\n end_date = datetime.datetime.strptime(end_date_str, \"%Y-%m-%d\")\n start_date = datetime.datetime.strptime(start_date_str, \"%Y-%m-%d\")\n\n except ValueError:\n start_date = None\n end_date = None\n\n print(\"File name {} has incorrect format\".format(filename))\n return False, start_date, end_date\n\n if start_date >= end_date:\n print(\"File name {} has incorrect format\".format(filename))\n return False, start_date, end_date\n\n return True, start_date, end_date", "def is_fasta(filename: Path) -> bool:\n filename = Path(filename)\n if filename.exists():\n fasta = list(SeqIO.parse(str(filename), \"fasta\"))\n return any(fasta)\n else:\n return False", "def _is_probably_new_datfile_format(raw_data):\n return \"<OOI-ts:\" in raw_data", "def is_manga(file):\n return file.lower().endswith(MANGA_EXTENSIONS)", "def read_file(self):\n try:\n self.json_parsed_file = parse_progress_report(self.in_file)\n self.output_message += 'Student: {}, {}\\n'.format(self.json_parsed_file['id'],\n self.json_parsed_file['name'])\n return True\n\n except IOError:\n self.output_message += \"File does not exist\\n\"\n self.is_parsed_pdf_valid = False\n return False\n\n except TypeError:\n self.output_message += \"There is an issue with the file\\n\"\n self.is_parsed_pdf_valid = False\n return False", "def is_valid_file(input_file):\n if not os.path.isfile(input_file):\n print('File \\'{}\\' not found.'.format(input_file))\n exit(1)\n return input_file", "def test_is_fasta_header(self):\r\n\r\n is_fasta_header = False\r\n\r\n with open(full_file_name, \"r\") as in_file:\r\n for line in in_file:\r\n is_fasta_header = mfau.is_header_line(line)\r\n\r\n # only testing the first line\r\n break\r\n\r\n self.assertEqual(is_fasta_header, True)", "def __checkFile(self, filename):\n \n try:\n with open(filename, 'r') as f:\n first_line = f.readline()\n \n if not len(first_line.split(\"\\t\")) == 19:\n raise BadProteomeScoutFile(\"N/A\")\n \n \n except:\n BadProteomeScoutFile(\"Invalid ProteomeScout flat file %s.\\nFile is invalid or corrupted\" % str(filename))", "def test_fasta_file(self):\r\n self.assertRaises(IOError, convert_fastaqual_fastq,\r\n self.read_only_output_dir, self.qual_file_path)", "def CheckRequiredFormat(self, parser_mediator, text_reader):\n # Format verification will be faster on average by checking the presence of\n # fixed-text fragments first.\n if 'Start-Date: ' not in text_reader.lines:\n return False\n\n try:\n structure = self._VerifyString(text_reader.lines)\n except errors.ParseError:\n return False\n\n time_elements_structure = self._GetValueFromStructure(\n structure, 'date_time')\n\n try:\n self._ParseTimeElements(time_elements_structure)\n except errors.ParseError:\n return False\n\n self._ResetState()\n\n return True", "def checkFileFormat(self, cellPos):\n try:\n cellPosList = cellPos.split()\n cellXPos = int(cellPosList[0])\n cellYPos = int(cellPosList[1])\n except ValueError:\n messagebox.showerror(\"Error: Wrong format\", \"The choosen file do not have the correct format. Be so kind to choose an other file.\")\n return False\n pass\n\n return (cellXPos, cellYPos)", "def test_fastaqual_output(self):\r\n self.assertRaises(IOError, convert_fastaqual, self.fasta_file_path,\r\n output_directory=self.read_only_output_dir)", "def testCheckRequiredFormat(self):\n plugin = gdrive_synclog.GoogleDriveSyncLogTextPlugin()\n\n file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()\n file_system_builder.AddFile('/file.txt', (\n b'2018-01-24 18:25:08,454 -0800 INFO pid=2376 7780:MainThread '\n b'logging_config.py:295 OS: Windows/6.1-SP1\\n'))\n\n file_entry = file_system_builder.file_system.GetFileEntryByPath('/file.txt')\n\n parser_mediator = self._CreateParserMediator(None, file_entry=file_entry)\n\n file_object = file_entry.GetFileObject()\n text_reader = text_parser.EncodedTextReader(file_object)\n text_reader.ReadLines()\n\n result = plugin.CheckRequiredFormat(parser_mediator, text_reader)\n self.assertTrue(result)", "def test_file_valid(self) -> None:\n test_file = \"empty.css\"\n initial_content = self._file_read(test_file)\n CSSFormatter.format(self._file_path(test_file))\n assert initial_content == self._file_read(test_file)", "def isfile(line):\n return line and (line.strip()[:3] == \"FHS\" or isbatch(line))", "def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success", "def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success", "def is_valid_file(self, file_path):\n return True", "def test_file(self, file: CollectedFile):\n\n return file.filename[-3:].upper() == 'TXT'", "def _check_valid_file(self, file):\n\n try:\n _ = open(f\"{file}\")\n except FileNotFoundError:\n raise ValueError", "def valid_file(file):\n filename = file\n #print(filename)\n with open(filename) as fileIn:\n line = fileIn.readline()\n tag = False\n lineCount= 0\n while line:\n lineCount = linecount(line)\n letter_list = line.split()\n first_letter = letter_list[0]\n if lineCount == 81 and line.endswith('\\n') and first_letter in record_type.values():\n tag = True\n \n \n else:\n tag = False\n \n line = fileIn.readline() \n \n if tag == True:\n #menu_display1(filename)\n return filename\n if tag == False:\n print('File \\033[1;31merror\\033[1;m not a PDB file')\n return None", "def checkFormat(vcfname):\n if vcfname == '-':\n return 'vcf' #may want to note its a stream\n typ = checkIfGzip(vcfname)\n if typ != 'nozip':\n return typ\n f = open(vcfname)\n l = f.readline()\n f.close()\n VCF_TAG='##fileformat=VCF'\n if l[:len(VCF_TAG)] == VCF_TAG:\n return 'vcf'\n return 'other'", "def check_fasta_gff(fasta,\n gff,\n issues_file,\n feature_format=CDS_FEATURE_FORMAT,\n use_feature_name=False,\n start_codons=[START_CODON],\n is_verbose=False,\n delimiter=\"\\t\"):\n config, metadata, issues = run_fasta_gff_check(\n fasta, gff, feature_format, use_feature_name, start_codons)\n issue_counts = count_issues(issues)\n header = dict(config)\n header.update(metadata)\n header.update(issue_counts)\n write_issues_to_csv(issues, issues_file, header, delimiter)\n print(\"Configuration:\")\n for (tag, value) in config.items():\n print(\"{}\\t{}\".format(tag, value))\n print(\"\\nMetadata:\")\n for (tag, value) in metadata.items():\n print(\"{}\\t{}\".format(tag, value))\n print(\"\\nIssue summary:\")\n print(\"{}\\t{}\".format(\"Issue\", \"Count\"))\n for (tag, value) in issue_counts:\n print(\"{}\\t{}\".format(tag, value))\n if is_verbose:\n print(\"\\nIssue details:\")\n for (sequence_id, feature_id, issue_type, issue_data) in issues:\n if issue_type in ISSUE_FORMATS:\n print(ISSUE_FORMATS[issue_type].format(sequence=sequence_id,\n feature=feature_id,\n data=issue_data))", "def is_file_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_exists(args, skip=True)\n or is_valid_comments(args, skip=True)\n or is_valid_paragraphs(args, skip=True)\n or is_valid_words(args, skip=True)\n ):\n return True\n return False", "def checkFile(grammar, file):\n correct = 0.0\n text = open(file).read()\n for str in text.split():\n str = \" \".join(str)\n parser = Parser(grammar, str)\n\n if parser.print_tree(False):\n # successfully parsed\n correct += 1\n\n return correct / len(text.split())", "def is_matched(afile, ffilter=[]):\n if ffilter:\n # we check only full name and extension of file\n if (os.path.basename(afile) in ffilter) or (file_ext(afile) in ffilter):\n return True\n else:\n return False\n return True", "def is_valid_file(args):\n if args.file is not None:\n return True\n return False", "def test_is_entry_formatted(self):\n\n valid_formats = test_case_data.get('valid_formats')\n for i, valid_entry in enumerate(test_case_data.get('valid_entries')):\n entry = [value.strip() for value in valid_entry.split(',')]\n format_fields = valid_formats[i].split(',')\n valid = self.parser._is_entry_formatted(entry, format_fields)\n self.assertTrue(valid, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [value.strip() for value in invalid_entry.split(',')]\n for f in valid_formats:\n format_fields = f.split(',')\n entry_dict = self.parser._is_entry_formatted(entry, format_fields)\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def test_validate_fasta(self):\r\n\r\n validate_fasta(self.sample_fasta_fp, self.sample_mapping_fp,\r\n self.output_dir)\r\n\r\n expected_log_fp = join(self.output_dir,\r\n split(self.sample_fasta_fp)[1] + \"_report.log\")\r\n\r\n log_f = open(expected_log_fp, \"U\")\r\n actual_log_lines = [line.strip() for line in log_f][1:]\r\n\r\n expected_log_lines = \"\"\"Percent duplicate labels: 0.000\r\nPercent QIIME-incompatible fasta labels: 0.000\r\nPercent of labels that fail to map to SampleIDs: 0.000\r\nPercent of sequences with invalid characters: 0.000\r\nPercent of sequences with barcodes detected: 0.000\r\nPercent of sequences with barcodes detected at the beginning of the sequence: 0.000\r\nPercent of sequences with primers detected: 0.000\"\"\".split('\\n')\r\n\r\n self.assertEqual(actual_log_lines, expected_log_lines)\r\n\r\n # Check with all optional values included\r\n\r\n validate_fasta(self.sample_fasta_fp, self.sample_mapping_fp,\r\n self.output_dir, tree_fp=self.sample_tree_5tips_fp, tree_subset=True,\r\n tree_exact_match=True, same_seq_lens=True, all_ids_found=True)\r\n\r\n expected_log_fp = join(self.output_dir,\r\n split(self.sample_fasta_fp)[1] + \"_report.log\")\r\n\r\n log_f = open(expected_log_fp, \"U\")\r\n actual_log_lines = [line.strip() for line in log_f][1:]\r\n\r\n expected_log_lines = \"\"\"Percent duplicate labels: 0.000\r\nPercent QIIME-incompatible fasta labels: 0.000\r\nPercent of labels that fail to map to SampleIDs: 0.000\r\nPercent of sequences with invalid characters: 0.000\r\nPercent of sequences with barcodes detected: 0.000\r\nPercent of sequences with barcodes detected at the beginning of the sequence: 0.000\r\nPercent of sequences with primers detected: 0.000\r\nSequence lengths report\r\nCounts of sequences, followed by their sequence lengths:\r\n1\\t35\r\n1\\t32\r\n1\\t27\r\nSample ID in fasta sequences report\r\nThe following SampleIDs were not found:\r\nseq2\r\nFasta label subset in tree tips report\r\nAll fasta labels were a subset of tree tips.\r\nFasta label/tree tip exact match report\r\nAll fasta labels found in tree tips.\r\nThe following tips were not in fasta labels:\r\nseq2\r\nseq5\r\nseq4\"\"\".split('\\n')\r\n\r\n self.assertEqual(actual_log_lines, expected_log_lines)", "def check_format_of_annotation_in_file(self):\n if not self.is_span_valid():\n sys.exit()", "def autodetect_format(file_data):\n\n # The first header line.\n for line in file_data:\n if line != []:\n break\n\n # Sparky format.\n if line[0] == 'Assignment':\n return 'sparky'\n\n # NMRView format.\n if line == ['label', 'dataset', 'sw', 'sf']:\n return 'nmrview'\n\n # NMRPipe SeriesTab.\n if line[0] == 'REMARK' and line[1] == 'SeriesTab':\n return 'seriestab'\n\n # XEasy format.\n if line == ['No.', 'Color', 'w1', 'w2', 'ass.', 'in', 'w1', 'ass.', 'in', 'w2', 'Volume', 'Vol.', 'Err.', 'Method', 'Comment']:\n return 'xeasy'\n\n # Assume a generic format.\n return 'generic'", "def check_file(file_name):\n\n if isinstance(file_name, str):\n if not os.path.isfile(file_name):\n raise ValueError('Got string input, but it is not a valid path')\n\n # check if this is just an xml file\n with open(file_name, 'rb') as fi:\n initial_bits = fi.read(30)\n if initial_bits.startswith(b'<?xml') or initial_bits.startswith(b'<SICD'):\n sicd_xml = fi.read()\n return _evaluate_xml_string_validity(sicd_xml)[0]\n\n return check_sicd_file(file_name)", "def is_declaring_file(self, address, file_path):", "def validation(nameFile, fileContent):\n\n\n dayNameFile = nameFile[-5:-4]\n monthNameFile = nameFile[-8:-6]\n yearNameFile = nameFile[-13:-9]\n hourNameFile = nameFile[-19:-14]\n hourNameFile = hourNameFile.replace(\"h\", \"\")\n \n\n if nameFile[0:6] == \"drones\":\n scopeNameFile = nameFile[0:6]\n elif nameFile[0:7] == \"parcels\":\n scopeNameFile = nameFile[0:7]\n\n headerFileContent = fileContent[constants.header]\n dateFile = headerFileContent[constants.headerTime]\n dayFile = dateFile[0:1]\n monthFile = dateFile[2:4]\n yearFile = dateFile[5:9]\n hourFile = headerFileContent[1]\n hourFile = hourFile.replace(\"h\", \"\")\n scopeFile = headerFileContent[constants.scope]\n\n\n return hourNameFile == hourFile and dayNameFile == dayFile and monthNameFile == monthFile and yearNameFile == yearFile and scopeNameFile == scopeFile", "def check_file_validity(self):\n # Initialize key variables\n file_ = self.tailed_file\n\n # Check if exists\n if os.path.exists(file_) is False:\n log_message = 'File {} does not exist.'.format(file_)\n log.log2die(1018, log_message)\n\n # Check if file\n if os.path.isfile(file_) is False:\n log_message = '{} is not a file.'.format(file_)\n log.log2die(1035, log_message)\n\n # Check if readable\n if not os.access(file_, os.R_OK):\n log_message = 'File {} is not readable.'.format(file_)\n log.log2die(1036, log_message)", "def testIsFile(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingFilePath=P(self.existingFilePathStr)\r\n nonExistingFilePath=P(self.nonExistingFilePathStr)\r\n existingValidFileSymlinkPath=P(self.existingValidSymlinkFilePathStr)\r\n existingInvalidFileSymlinkPath=\\\r\n P(self.existingInvalidSymlinkFilePathStr)\r\n existingDirPath=P(self.existingDirPathStr)\r\n\r\n\r\n # 1\r\n self.assertEquals(existingFilePath.isFile(),True,\r\n '%r is a file'%str(existingFilePath))\r\n\r\n # 2\r\n self.assertEquals(nonExistingFilePath.isFile(),False,\r\n 'File %r does not exist'%str(nonExistingFilePath))\r\n\r\n # 3\r\n self.assertEquals(existingValidFileSymlinkPath.isFile(),True,\r\n '%r is a file'%str(existingValidFileSymlinkPath))\r\n\r\n # 4\r\n self.assertEquals(existingInvalidFileSymlinkPath.isFile(),False,\r\n '%r is an invalid symlink'\r\n %str(existingInvalidFileSymlinkPath))\r\n\r\n # 5\r\n self.assertEquals(existingDirPath.isFile(),False,\r\n '%r is a dir'%str(existingDirPath))", "def _is_valid_fmt(self, fmt):\n # make sure there is no leading or trailing whitespace\n fmt = fmt.strip()\n \n if fmt[0] != '%':\n return False\n \n # Handle business calendars first.\n # This does not check the calendar name.\n if fmt[1:3] == \"tb\" or fmt[1:4] == \"-tb\":\n return True if TB_FMT_RE.match(fmt) else False\n \n # date formats\n if fmt[1] == 't' or fmt[1:3] == '-t':\n return True if TIME_FMT_RE.match(fmt) else False\n \n # categorize using last character\n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n if not m: return False\n width = int(m.group(3))\n if width == 0 or width > 2045: return False\n return True\n elif last_char == 'H' or last_char == 'L': # binary\n # Valid binary formats are ^%(8|16)(H|L)$. Stata doesn't raise \n # error with -8 or -16, but the results are perhaps unexpected.\n return True if fmt[1:-1] in ('8', '16', '-8', '-16') else False\n elif last_char == 'x': # hexadecimal\n return True if fmt == '%21x' or fmt == '%-12x' else False\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n if not m: return False\n width = int(m.group(3))\n if width == 0 or width <= int(m.group(5)) or width > 2045: \n return False\n return True\n \n return False", "def pdb_file_valid(pdb_file_name, user_rand):\n dcd_file = \"media/files/\" + user_rand + '/' + \"scr_for_checks.dcd\"\n\n fix_not_needed = True\n try:\n scr_for_checks(pdb_file_name, user_rand)\n except Exception as e:\n # print(str(e))\n fix_not_needed = False\n finally:\n if os.path.exists(dcd_file):\n os.remove(dcd_file)\n\n if fix_not_needed:\n return True\n\n try:\n fix_pdb(pdb_file_name)\n scr_for_checks(pdb_file_name, user_rand)\n except Exception as e:\n print(str(e))\n return False\n\n return True", "def check_valid_file_name(self, file_name, input_output):\n if self.check_file_exists (file_name):\n if self.check_file_name_extensions (file_name, input_output):\n return True\n else:\n return False\n else:\n print (\"File does not exist\")\n return False", "def isValid(self):\n return self.file_name != \"\" and self.line_number != 0", "def test_qual_file(self):\r\n self.assertRaises(IOError, convert_fastaqual_fastq,\r\n self.fasta_file_path, self.read_only_output_dir)", "def check_file_extensions(reference_sequences_path, input_fasta_path, input_cluster_path):\n\n\tsequence_file_extension = path.splitext(reference_sequences_path)[-1]\n\tfasta_file_extension = path.splitext(input_fasta_path)[-1]\n\tcluster_file_extension = path.splitext(input_cluster_path)[-1]\n\n\tif not sequence_file_extension == \".txt\":\n\t\tprint(\"[Warning] \" + sequence_file_extension + \" may not be a txt file!\")\n\tif not fasta_file_extension == \".faa\":\n\t\tprint(\"[Warning] \" + fasta_file_extension + \" may not be a FASTA file!\")\n\tif not cluster_file_extension == \".clstr\":\n\t\tprint(\"[Warning] \" + cluster_file_extension + \" may not be a CD-Hit cluster file!\")", "def _is_valid_fmt(self, fmt):\n # make sure there is no leading or trailing whitespace\n fmt = fmt.strip()\n \n if fmt[0] != '%':\n return False\n \n # Handle business calendars first.\n # This does not check the calendar name.\n if fmt[1:3] == \"tb\" or fmt[1:4] == \"-tb\":\n return True if TB_FMT_RE.match(fmt) else False\n \n # date formats\n if fmt[1] == 't' or fmt[1:3] == '-t':\n return True if TIME_FMT_RE.match(fmt) else False\n \n # categorize using last character\n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n if not m: return False\n width = int(m.group(3))\n if width == 0 or width > 244: return False\n return True\n elif last_char == 'H' or last_char == 'L': # binary\n # Valid binary formats are ^%(8|16)(H|L)$. Stata doesn't raise \n # error with -8 or -16, but the results are perhaps unexpected.\n return True if fmt[1:-1] in ('8', '16', '-8', '-16') else False\n elif last_char == 'x': # hexadecimal\n return True if fmt == '%21x' or fmt == '%-12x' else False\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n if not m: return False\n width = int(m.group(3))\n if width == 0 or width <= int(m.group(5)) or width > 244: \n return False\n return True\n \n return False", "def get_file_format(file):\n flag = None\n with open(file) as f:\n for line in f.readlines():\n MAT, MF, MT = read_control(line)[:3]\n if MF == 1 and MT == 451:\n i = 0\n C, i = read_cont([line], i)\n flag = C.N1\n break\n if flag is None:\n ftype = None\n elif flag == -11 or flag == -12:\n ftype = \"errorr\"\n elif flag == -1:\n ftype = \"gendf\"\n else:\n if C.L1 == 2:\n ftype = \"pendf\"\n else:\n ftype = \"endf6\"\n return ftype", "def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file content is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n\n elif not isinstance(content, (list, dict)):\n # testcase file content does not match testcase format\n err_msg = u\"Testcase file content format invalid: {}\".format(file_path)\n logger.log_error(err_msg)", "def _is_valid_file(arg: str) -> str:\n if not os.path.exists(arg):\n raise FileNotFoundError(\"%s does not exist\" % arg)\n return arg", "def is_filename_safe(file: Union[str, FileStorage]) -> bool:\n filename = _retrieve_filename(file)\n accepted_formats = '|'.join(IMAGES) #png|jpg|svg|jpeg\n # start with any of alphanumeric, follwed by alhpanumeric and special characters(any amount), \".\", then one of the accepted formats\n regex = f'[a-zA-Z0-9][a-zA-Z0-9_()-\\.]*\\.({accepted_formats})$'\n # if no match is found match() returns None\n return re.match(filename, regex) is not None", "def test_full_fasta_headers(self):\r\n convert_fastaqual(self.fasta_file_path, full_fasta_headers=True,\r\n output_directory=self.output_dir)\r\n\r\n actual_output_fasta_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '.fna',\r\n self.output_dir)\r\n\r\n actual_output_qual_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '.qual',\r\n self.output_dir)\r\n\r\n actual_output_fasta = open(actual_output_fasta_path)\r\n actual_output_qual = open(actual_output_qual_path)\r\n actual_fasta = actual_output_fasta.read()\r\n actual_output_fasta.close()\r\n actual_qual = actual_output_qual.read()\r\n actual_output_qual.close()\r\n self._files_to_remove.append(actual_output_fasta_path)\r\n self._files_to_remove.append(actual_output_qual_path)\r\n\r\n self.assertEquals(actual_fasta, expected_fasta_full_fasta_headers)\r\n self.assertEquals(actual_qual, expected_qual_full_fasta_headers)", "def check_base_filename(self, record):\n time_tuple = time.localtime()\n\n if self.file_name_format:\n pass\n\n if self.suffix_time != time.strftime(self.suffix, time_tuple) or not os.path.exists(\n self._get_format_filename()):\n return 1\n else:\n return 0", "def assert_style_data_correct(self) -> bool:\n style_chars = Path(os.environ[\"DATA_PATH\"]) / \"character_styles\"\n style_frags = Path(os.environ[\"DATA_PATH\"]) / \"fragment_styles\"\n if style_chars.exists() and style_frags.exists():\n return True\n return False", "def check_file_encoding(self, input_file_path):\n self.log([u\"Checking encoding of file '%s'\", input_file_path])\n self.result = ValidatorResult()\n if self._are_safety_checks_disabled(u\"check_file_encoding\"):\n return self.result\n if not gf.file_can_be_read(input_file_path):\n self._failed(u\"File '%s' cannot be read.\" % (input_file_path))\n return self.result\n with io.open(input_file_path, \"rb\") as file_object:\n bstring = file_object.read()\n self._check_utf8_encoding(bstring)\n return self.result", "def ShouldCheckFile(file_name):\n checked_extensions = [\n '.c',\n '.cc',\n '.h',\n '.m',\n '.mm',\n # These are not the preferred extension in our codebase,\n # but including them for good measure.\n # (They do appear in the newlib toolchain + third_party libraries).\n '.cpp',\n '.hpp',\n ]\n basename, extension = os.path.splitext(file_name)\n return extension in checked_extensions", "def supportsFile(self, filename):\n extension = filename.rsplit(\".\", 1)[1]\n return extension in AcronymDisambiguator.supported_extensions", "def test_match_entry_to_format(self):\n\n # matches valid entries with valid formats\n for valid_entry in test_case_data.get('valid_entries'):\n entry = [e.strip() for e in valid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertTrue(entry_dict, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [e.strip() for e in invalid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def test_run_fasta_checks(self):\r\n\r\n actual_fasta_report = run_fasta_checks(self.sample_fasta_fp,\r\n self.sample_mapping_fp)\r\n\r\n # All values should be zero\r\n expected_fasta_report = {'invalid_labels': '0.000',\r\n 'tree_subset': False,\r\n 'all_ids_found': False,\r\n 'same_seq_lens': False,\r\n 'barcodes_detected': '0.000',\r\n 'duplicate_labels': '0.000',\r\n 'duplicate_ids': [],\r\n 'invalid_seq_chars': '0.000',\r\n 'nosample_ids_map': '0.000',\r\n 'linkerprimers_detected': '0.000',\r\n 'tree_exact_match': False,\r\n 'barcodes_at_start': '0.000'}\r\n\r\n self.assertEqual(actual_fasta_report, expected_fasta_report)", "def is_valid_file(ext, argument):\n formats = {\n 'input_dataset_path': ['csv', 'txt'],\n 'output_dataset_path': ['csv'],\n 'output_plot_path': ['png'],\n 'input_model_path': ['pkl']\n }\n return ext in formats[argument]", "def validate(self):\n\n audio_filename = basename(strip_extension(self.audio_file.location))\n transcript_filename = basename(strip_extension(self.transcript_file.location))\n\n # Audio and transcript filename must match\n # Audio file must not be empty\n # Transcript file must not be empty\n valid = (\n audio_filename == transcript_filename\n and os.path.getsize(self.audio_file.location)\n and os.path.getsize(self.transcript_file.location)\n )\n # This returns an integer corresponding to the output of the last condition, not a boolean.\n # Thats just how `and` works in python\n\n return bool(valid)", "def test_default_format_file_name():\n\toutput_name = format_file_name(test_file)\n\tassert (output_name == test_file[:-4] + \"_no_grave.h5m\") == True", "def does_file_have_416_issue(file_path: str) -> bool:\n with open(file_path, \"rb\") as file_handler:\n file_handler.seek(-1024, os.SEEK_END)\n if b\"416 Requested Range Not Satisfiable\" in file_handler.read():\n return True\n return False", "def test_validate_fasta_with_invalid(self):\r\n\r\n validate_fasta(self.sample_fasta_invalid_fp, self.sample_mapping_fp,\r\n self.output_dir)\r\n\r\n expected_log_fp = join(self.output_dir,\r\n split(self.sample_fasta_invalid_fp)[1] + \"_report.log\")\r\n\r\n log_f = open(expected_log_fp, \"U\")\r\n actual_log_lines = [line.strip() for line in log_f][1:]\r\n\r\n expected_log_lines = \"\"\"Percent duplicate labels: 0.250\r\nPercent QIIME-incompatible fasta labels: 0.500\r\nPercent of labels that fail to map to SampleIDs: 0.750\r\nPercent of sequences with invalid characters: 0.500\r\nPercent of sequences with barcodes detected: 0.250\r\nPercent of sequences with barcodes detected at the beginning of the sequence: 0.000\r\nPercent of sequences with primers detected: 0.250\r\nDuplicate labels found:\r\nseq1\"\"\".split('\\n')\r\n\r\n self.assertEqual(actual_log_lines, expected_log_lines)", "def _verify_format(s, format):\n r = re.compile(format)\n if r.match(s) is not None:\n return True\n return False", "def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file conetent is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n raise exception.FileFormatError(err_msg)", "def sniff( self, filename ):\n handle = open(filename)\n line = handle.readline()\n handle.close()\n first = line.split()\n\n if ( len(first) != 6 ):\n return False\n\n if ( first[5] != \"1\" and first[5] != \"0\" ):\n return False\n\n if ( first[2].isalnum() or first[3].isalnum() or first[4].isalnum() ):\n return False\n\n return True", "def _isvalid_file(filename):\r\n thisisavalidfile = True\r\n if (filename[0] == \".\") or (filename[0] == \"_\") or not ((filename.split(\".\")[-1] == \"txt\") or (filename.split(\".\")[-1] == \"csv\")):\r\n thisisavalidfile = False\r\n\r\n return thisisavalidfile", "def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False", "def _check_is_file(_string: str) -> str:\n if os.path.isfile(_string):\n return _string\n else:\n raise argparse.ArgumentTypeError(\"{0} file does \"\n \"not exists.\".format(_string))", "def check_record(self, mask, fullpath):\r\n if self.is_carved_gzip:\r\n decode_error = False\r\n # Flag conflicts\r\n # These flag combinations can not exist together\r\n type_err = \"FolderEvent\" in mask[0] and \"FileEvent\" in mask[0]\r\n fol_cr_err = \"FolderEvent\" in mask[0] and \"Created\" in mask[1] and \\\r\n \"FolderCreated\" not in mask[1]\r\n fil_cr_err = \"FileEvent\" in mask[0] and \"FolderCreated\" in mask[1]\r\n lnk_err = \"SymbolicLink\" in mask[0] and \"HardLink\" in mask[0]\r\n h_lnk_err = \"HardLink\" not in mask[0] and \"LastHardLink\" in mask[1]\r\n h_lnk_err_2 = \"LastHardLink\" in mask[1] and \";Removed\" not in mask[1]\r\n n_used_err = \"NOT_USED-0x0\" in mask[1]\r\n ver_error = \"ItemCloned\" in mask[1] and self.dls_version == 1\r\n\r\n # If any error exists return false to caller\r\n if type_err or \\\r\n fol_cr_err or \\\r\n fil_cr_err or \\\r\n lnk_err or \\\r\n h_lnk_err or \\\r\n h_lnk_err_2 or \\\r\n n_used_err or \\\r\n decode_error or \\\r\n ver_error:\r\n return False\r\n else:\r\n # Record passed tests and may be valid\r\n # return true so that record is included in output reports\r\n return True\r\n else:\r\n # Return true. fsevent file was not identified as being carved\r\n return True", "def can_handle(file_io):\r\n\r\n try:\r\n file_io.seek(0)\r\n parsed = etree.parse(file_io)\r\n except XMLSyntaxError:\r\n # IF etree can't parse it, it's not our file.\r\n return False\r\n can_handle = False\r\n can_handle = DelXMLImporter._is_delicious_format(parsed,\r\n can_handle)\r\n\r\n # make sure we reset the file_io object so that we can use it again\r\n return can_handle", "def file_name_check(file_name):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"9\")\n # END OF SOLUTION", "def _check_or_apply_style(file_path, style_config, apply):\n # Ref: https://gist.github.com/oskopek/496c0d96c79fb6a13692657b39d7c709\n with open(file_path, \"r\") as f:\n notebook = nbformat.read(f, as_version=nbformat.NO_CONVERT)\n nbformat.validate(notebook)\n\n changed = False\n for cell in notebook.cells:\n if cell[\"cell_type\"] != \"code\":\n continue\n src = cell[\"source\"]\n lines = src.split(\"\\n\")\n if len(lines) <= 0 or \"# noqa\" in lines[0]:\n continue\n # yapf will puts a `\\n` at the end of each cell, and if this is the\n # only change, cell_changed is still False.\n formatted_src, cell_changed = yapf.yapflib.yapf_api.FormatCode(\n src, style_config=style_config)\n if formatted_src.endswith(\"\\n\"):\n formatted_src = formatted_src[:-1]\n if cell_changed:\n cell[\"source\"] = formatted_src\n changed = True\n\n if apply:\n with open(file_path, \"w\") as f:\n nbformat.write(notebook, f, version=nbformat.NO_CONVERT)\n\n return not changed", "def CheckFile(rules, file_name):\n if VERBOSE:\n print \"Checking: \" + file_name\n\n ret_val = \"\" # We'll collect the error messages in here\n last_include = 0\n try:\n cur_file = open(file_name, \"r\")\n in_if0 = 0\n for line_num in xrange(sys.maxint):\n if line_num - last_include > MAX_UNINTERESTING_LINES:\n break\n\n cur_line = cur_file.readline(MAX_LINE_LENGTH)\n if cur_line == \"\":\n break\n cur_line = cur_line.strip()\n\n # Check to see if we're at / inside a #if 0 block\n if cur_line == '#if 0':\n in_if0 += 1\n continue\n if in_if0 > 0:\n if cur_line.startswith('#if'):\n in_if0 += 1\n elif cur_line == '#endif':\n in_if0 -= 1\n continue\n\n is_include, line_status = CheckLine(rules, cur_line)\n if is_include:\n last_include = line_num\n if line_status is not None:\n if len(line_status) > 0: # Add newline to separate messages.\n line_status += \"\\n\"\n ret_val += line_status\n cur_file.close()\n\n except IOError:\n if VERBOSE:\n print \"Unable to open file: \" + file_name\n cur_file.close()\n\n # Map empty string to None for easier checking.\n if len(ret_val) == 0:\n return None\n return ret_val", "def check_correctness(self):\n\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = gt_file.readlines()\n\n # Check for inequality\n if len(out_lines) != len(gt_lines):\n return 0\n\n # Check for inequality\n for i in range(len(out_lines)):\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n\n return 1", "def isValid(ssToCheck):\n\n # Validate ssToCheck argument.\n if not isinstance(ssToCheck, str) or len(ssToCheck) < 2:\n raise ShortStrException('ssToCheck argument must be a string at least two characters long')\n\n if RUNNING_PY_2:\n checksum = zlib.adler32(ssToCheck[:-1].decode('utf-8'))\n else:\n checksum = zlib.adler32(bytes(ssToCheck[:-1], encoding='utf-8'))\n\n return ssToCheck[-1] == GLYPHS[checksum % LEN_GLYPHS] # Make sure last character in ssToCheck is the correct checksum character.", "def test_check_mapping_file_correct_file(self):\r\n\r\n # Use valid data, default parameters\r\n check_mapping_file(mapping_fp=self.correct_mapping_fp,\r\n output_dir=self.output_dir,\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_data_correct_input)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_correct_input)\r\n self.assertEqual(log_data, self.expected_log_data_correct_input)\r\n\r\n # With additional parameters added should not change results using\r\n # same valid input data\r\n check_mapping_file(mapping_fp=self.correct_mapping_fp,\r\n output_dir=self.output_dir,\r\n has_barcodes=True,\r\n char_replace=\"A\",\r\n verbose=False,\r\n variable_len_barcodes=True,\r\n disable_primer_check=True,\r\n added_demultiplex_field=None)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_data_correct_input)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_correct_input)\r\n self.assertEqual(log_data, self.expected_log_data_correct_input)", "def compatible_file(file: Union[BinaryIO, str, Path]) -> bool:\n try:\n fd = open(file, 'rb') if isinstance(file, (str, Path)) else file\n\n offset = fd.tell()\n fd.seek(0)\n result = _bytes_match(fd, ELF_MAGIC) or _bytes_match(fd, ARCHIVE_MAGIC)\n fd.seek(offset)\n finally:\n if isinstance(file, (str, Path)):\n fd.close()\n\n return result", "def test_style_guide_applies_to(style_guide_file, filename, expected):\n formatter = mock.create_autospec(base.BaseFormatter, instance=True)\n options = create_options()\n guide = style_guide.StyleGuide(\n options,\n formatter=formatter,\n stats=statistics.Statistics(),\n filename=style_guide_file,\n )\n assert guide.applies_to(filename) is expected", "def test_check_canonical_filenames(self):\n contents = self.read_metadata_contents()\n family_metadata = Metadata.get_family_metadata(contents)\n for font_metadata in family_metadata.fonts:\n canonical_filename = self.create_canonical_filename(font_metadata)\n if canonical_filename != font_metadata.filename:\n self.fail('{} != {}'.format(canonical_filename,\n font_metadata.filename))", "def can_handle(file_io):\r\n if (file_io.closed):\r\n file_io = open(file_io.name)\r\n file_io.seek(0)\r\n soup = BeautifulSoup(file_io)\r\n can_handle = False\r\n gbookmark_doctype = \"DOCTYPE NETSCAPE-Bookmark-file-1\"\r\n can_handle = GBookmarkImporter._is_google_format(soup,\r\n gbookmark_doctype,\r\n can_handle)\r\n\r\n # make sure we reset the file_io object so that we can use it again\r\n file_io.seek(0)\r\n return can_handle", "def assertAssemblesTo(self, formatted, expectedFormatted):\n text = irc.assembleFormattedText(formatted)\n expectedText = irc.assembleFormattedText(expectedFormatted)\n self.assertEqual(\n irc.assembleFormattedText(formatted),\n expectedText,\n \"%r (%r) is not equivalent to %r (%r)\"\n % (text, formatted, expectedText, expectedFormatted),\n )", "def test_parse_fasta_file(self):\r\n\r\n fasta_data = ['>seq1 SAMPLE1', 'AAACGT', '>seq2', 'ACGGT']\r\n\r\n expected_fasta = {'seq1': 'AAACGT', 'seq2': 'ACGGT'}\r\n\r\n expected_order = ['seq1 SAMPLE1', 'seq2']\r\n\r\n actual_fasta, actual_order = parse_fasta_file(fasta_data)\r\n\r\n self.assertEqual(actual_fasta, expected_fasta)\r\n\r\n self.assertEqual(actual_order, expected_order)", "def checkFile(self,selected_file):\n path_holder = pathlib.Path(selected_file)\n if path_holder.exists():\n if path_holder.is_file():\n if path_holder.stat().st_size == 0 or path_holder.stat().st_size is None:\n raise CoreException.FileEmptyError(\"File should not be empty!\")\n return False\n\n if path_holder.is_symlink():\n raise CoreException.FileNotSupportedError(\"Symbolic link not supported\")\n return False\n \n # File Clean if they pass the required identity of file.\n return True", "def test_option_format_file_name():\n\toutput_name = format_file_name(test_file, 'test_output.h5m')\n\tassert (output_name == 'test_output.h5m') == True" ]
[ "0.62929124", "0.6227019", "0.5895204", "0.56863064", "0.56821287", "0.5661317", "0.56600463", "0.5612253", "0.55521023", "0.55268717", "0.5512988", "0.5505371", "0.54996324", "0.5447883", "0.5413568", "0.5376837", "0.5368521", "0.53608316", "0.5308955", "0.52947026", "0.5244932", "0.52348405", "0.52277076", "0.5203777", "0.5201214", "0.51996005", "0.5199461", "0.51975805", "0.5197562", "0.51747024", "0.5163174", "0.51562905", "0.51551044", "0.51337755", "0.51337755", "0.5132085", "0.51260906", "0.51181793", "0.5110208", "0.5092954", "0.50927955", "0.5091844", "0.5090508", "0.50881994", "0.50814444", "0.5076256", "0.507418", "0.50718004", "0.5058798", "0.5057357", "0.5050137", "0.5045066", "0.50449276", "0.5043641", "0.50159585", "0.5011115", "0.500019", "0.49944708", "0.499298", "0.49915308", "0.49912563", "0.49894792", "0.4980965", "0.49779382", "0.49708214", "0.49645406", "0.4961116", "0.49592784", "0.49567854", "0.4948861", "0.4944628", "0.4930846", "0.49259633", "0.49213174", "0.49206102", "0.4917883", "0.49107322", "0.49102867", "0.4907705", "0.49059483", "0.4896976", "0.48951563", "0.48910564", "0.48909375", "0.48859048", "0.48857617", "0.48852155", "0.488438", "0.4882236", "0.4866081", "0.48630106", "0.4857592", "0.48554268", "0.48503274", "0.4833623", "0.48323783", "0.4828119", "0.4814821", "0.48107246", "0.4802489" ]
0.55602217
8
Parse read and quality strings from a FASTQ file with sequencing reads.
def readFastq(filename): sequences = [] qualities = [] with open(filename) as fh: while True: fh.readline() # skip name line seq = fh.readline().rstrip() #read base sequence fh.readline() # skip placeholder line qual = fh.readline().rstrip() # base quality line if len(seq) == 0: break sequences.append(seq) qualities.append(qual) return sequences, qualities
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readFastq(filename):\n\tsequences = []\n\tqualities = []\n\twith open(filename, 'r') as f:\n\t\twhile True: \n\t\t\tf.readline() # skip name line\n\t\t\tseq = f.readline().rstrip()\n\t\t\tf.readline() # skip place holder line \n\t\t\tq = f.readline().rstrip()\n\t\t\tif len(seq) ==0:\n\t\t\t\tbreak \n\t\t\tsequences.append(seq)\n\t\t\tqualities.append(q)\n\treturn sequences, qualities", "def seqs_from_file(filename, exit_on_err=False, return_qual=False):\n # VALIDATE INPUT\n if not isinstance(filename, str):\n msg = 'Filename has to be a string.'\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n if not os.path.exists(filename):\n msg = 'File \"%s\" does not exist.'%filename\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n \n # EXTRACT DATA\n with open_(filename,\"rt\") as f:\n query_seq_segments = []\n seq, name, desc, qual = '', '', '', ''\n add_segment = query_seq_segments.append\n for l in f:\n if len(l.strip()) == 0: continue\n #sys.stderr.write(\"%s\\n\"%line)\n fields=l.strip().split()\n if l.startswith(\">\"):\n # FASTA HEADER FOUND\n if query_seq_segments != []:\n # YIELD SEQUENCE AND RESET\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)\n seq, name, desc = '', '', ''\n del query_seq_segments[:]\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n \n elif l.startswith(\"@\"):\n # FASTQ HEADER FOUND\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n try:\n # EXTRACT FASTQ SEQUENCE\n seq = next(f).strip().split()[0]\n # SKIP SECOND HEADER LINE AND QUALITY SCORES\n l = next(f)\n qual = next(f).strip() # Qualities\n except:\n break\n else:\n # YIELD SEQUENCE AND RESET\n if return_qual:\n yield (seq, qual, name, desc)\n else:\n yield (seq, name, desc)\n seq, name, desc, qual = '', '', '', ''\n \n elif len(fields[0])>0:\n # EXTRACT FASTA SEQUENCE\n add_segment(fields[0])\n \n # CHECK FOR LAST FASTA SEQUENCE\n if query_seq_segments != []:\n # YIELD SEQUENCE\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)", "def _parse_fastq(f):\n header = ''\n seq = ''\n skip = False\n for line in f:\n if skip:\n skip = False\n continue\n line = line.strip()\n if line == '':\n continue\n if line[0] == '@':\n header = line.replace('@', '')\n elif line[0] == '+':\n yield header, seq\n skip = True\n else:\n seq = line.upper()", "def read_seqs(f):\n while True:\n # Read the sequence ID. If there's nothing to read, then we're done.\n try:\n seq_id = read_line(f)\n except EOFError:\n return\n\n # If we successfully read a sequence ID, then running out of stuff to\n # read means a truncated record.\n try:\n seq = str_to_byte_array(read_line(f))\n qual_id = read_line(f)\n qual = str_to_byte_array(read_line(f))\n except EOFError:\n raise EOFError('EOF while reading sequence.')\n\n # Some simple checks of the data.\n if seq_id[0] != '@':\n raise ValueError(\"Sequence ID doesn't begin with '@'.\")\n if qual_id[0] != '+':\n raise ValueError(\"Quality ID doesn't begin with '+'.\")\n if len(seq) != len(qual):\n raise ValueError(\"Sequence and quality are different lengths.\")\n\n yield (seq_id, seq, qual_id, qual)", "def process_fastq(fastq_file):\n current_record = {}\n\n for name, seq, blank, quality in zip(*[iter(fastq_file)]*4):\n current_record['name'] = name.strip('\\n')\n current_record['seq'] = seq.strip('\\n')\n current_record['quality'] = quality.strip('\\n')\n\n yield current_record", "def readfq(fp): # this is a generator function\n last = None # this is a buffer keeping the last unprocessed line\n while True: # mimic closure; is it a bad idea?\n if not last: # the first record or a record following a fastq\n for l in fp: # search for the start of the next record\n if l[0] in '>@': # fasta/q header line\n last = l[:-1] # save this line\n break\n if not last: break\n name, seqs, last = last[1:].partition(\" \")[0], [], None\n for l in fp: # read the sequence\n if l[0] in '@+>':\n last = l[:-1]\n break\n seqs.append(l[:-1])\n if not last or last[0] != '+': # this is a fasta record\n yield name, ''.join(seqs), None # yield a fasta record\n if not last: break\n else: # this is a fastq record\n seq, leng, seqs = ''.join(seqs), 0, []\n for l in fp: # read the quality\n seqs.append(l[:-1])\n leng += len(l) - 1\n if leng >= len(seq): # have read enough quality\n last = None\n yield name, seq, ''.join(seqs); # yield a fastq record\n break\n if last: # reach EOF before reading enough quality\n yield name, seq, None # yield a fasta record instead\n break", "def read_fastq(filename, strip_second_header=True):\n\n with open(filename) as fastq:\n line = fastq.readline()\n if not line.startswith(\"@\"):\n raise IOError(\"Not FASTQ format? First line didn't start with @\")\n while fastq:\n if line.startswith(\"@\"):\n header = line.rstrip()\n seq = fastq.readline().rstrip()\n second_header = fastq.readline()\n if strip_second_header:\n second_header = \"+\"\n scores = fastq.readline().rstrip()\n yield header, seq, second_header, scores\n elif line == \"\": # EOF\n yield header, seq, second_header, scores\n break\n line = fastq.readline()", "def read(infile):\n if isinstance(infile, str):\n infile = open(infile)\n\n with infile:\n while True:\n cmt = infile.readline().strip()\n seq = infile.readline().strip()\n plus = infile.readline().strip()\n qual = infile.readline().strip()\n\n if not cmt:\n break\n if not cmt.startswith('@') or plus != '+':\n raise ValueError('fastq file <{}> is corrupted'.format(infile.path))\n yield SRecord(cmt=cmt[1:], seq=seq, qual=qual)", "def fastq_reader(fastq):\n group_gen = grouper(fastq, 4)\n for record in group_gen:\n # drop the @ before the name and any text after a whitespace\n name = record[0].split(' ')[0][1:].strip()\n seq = record[1].strip()\n yield name, seq", "def fasta_reader(inp):\n #inp is hard coded as \"Sequence1/2.fasta in this script\".\n with open(inp) as in_file: \n for line in in_file.readlines():\n #Guarantees sequence is pulled from the FASTA file not the title \n if line[0].isalpha():\n seq = line.rstrip()\n return (seq)", "def readFastaFile(filename):\n if os.path.exists(filename)==False:return {}\n sequences={}\n fhr=open(filename,\"r\")\n for line in fhr:\n if line[0]==\">\":\n sequences[line.strip()[1:].split()[0]]=fhr.readline().strip()\n fhr.close()\n return sequences", "def parse_fasta(self, filename):\n id = ''\n desc = ''\n tempseq = []\n try:\n seqfile = open(filename,'r')\n for line in seqfile:\n if line.startswith('>'):\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n if ' ' in line:\n (id, desc) = line[1::].split(' ', 1)\n else:\n id = line[1::].strip()\n desc = ''\n tempseq = []\n elif not line.startswith('>'):\n tempseq.append(line.rstrip())\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n except OSError:\n raise PathError(''.join(['ERROR: cannot open', refseqpath]))", "def read_transcript_data(fn):\n\n def _read_lines(fn):\n # NC_000007.13\tRefSeq\tcDNA_match\t50344265\t50344518\t254\t+\t.\tID=aln58042;Target=NM_001220765.2 1 254 +;gap_count=0;identity=0.0691326;idty=1;num_ident=428;num_mismatch=0;pct_coverage=6.91326;pct_identity_gap=100;pct_identity_ungap=100;score=254\n # NC_000002.11 RefSeq cDNA_match 179671939 179672150 212 - . ID=ed951d46-194c-477a-a480-4bc64530c5ba;Target=NM_001267550.2 1 212 +;gap_count=0;identity=0.999991;idty=1;num_ident=109223;num_mismatch=1;pct_coverage=100;pct_identity_gap=99.9991;pct_identity_ungap=99.9991\n line_re = re.compile(\n \"(?P<ref_ac>\\S+)\\s+(?P<origin>\\S+)\\s+(?P<match_type>\\S+)\\s+\"\n \"(?P<g_start>\\d+)\\s+(?P<g_end>\\d+)\\s+(?P<score>\\S+)\\s+\"\n \"(?P<strand>[-+])\\s+\\.\\s+ID=(?P<aln>[^;]+);Target=(?P<tx_ac>\\S+)\"\n \"\\s+(?P<tx_start>\\d+)\\s+(?P<tx_end>\\d+).+?\"\n \"pct_coverage=(?P<pct_coverage>[^;]+);\"\n \"pct_identity_gap=(?P<pct_identity_gap>[^;]+);\"\n \"pct_identity_ungap=(?P<pct_identity_ungap>[^;]+)\"\n )\n fh = io.open(fn, \"rb\")\n while fh.peek(1)[0] == \"#\":\n fh.readline()\n while fh.peek(3)[0:3] != \"###\":\n line = fh.readline()\n try:\n yield line_re.match(line).groupdict()\n except AttributeError:\n raise Exception(\"Failed at\", line)\n raise StopIteration\n def _key(e):\n return (e[\"tx_ac\"], not e[\"ref_ac\"].startswith(\"NC_\"), e[\"ref_ac\"], e[\"aln\"])\n return itertools.groupby(sorted(_read_lines(fn), key=_key), key=_key)", "def read_sequence(filename):\n record = next(SeqIO.parse(filename, \"fasta\"))\n return record.description, str(record.seq)", "def parse_multifasta_file(file, number_of_fastas):\n\n with open(file) as file:\n for i in range(number_of_fastas):\n fasts_seq = ''\n fasta_name = file.readline().strip()[1:]\n end_of_file = False\n end_of_seq = False\n while not end_of_seq and not end_of_file:\n x = file.tell()\n seq = file.readline()\n if not seq:\n end_of_file = True\n elif '>' not in seq:\n fasts_seq = fasts_seq + seq\n else:\n file.seek(x)\n end_of_seq = True\n fasts_seq = re.sub(r'\\n', '', fasts_seq)\n yield fasta_name, fasts_seq", "def FastqIterator(fh):\n def readTotitle(fh, titleChar):\n \"\"\"returns a tuple ([lines before the next title line], next tile line)\n \"\"\"\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith(titleChar):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)\n\n if type(fh) in StringTypes:\n fh = file(fh)\n\n preLines,nextTitleLine =readTotitle(fh,'@')\n\n while nextTitleLine != None:\n seqTitle = nextTitleLine[1:].rstrip()\n preLines,nextTitleLine=readTotitle(fh,'+')\n qualTitle = nextTitleLine[1:].rstrip()\n if len(qualTitle.strip()) > 0 and seqTitle != qualTitle:\n print seqTitle\n print preLines\n print qualTitle\n raise hmmErrors.InvalidFastq, \"Error in parsing: @title sequence entry must be immediately followed by corresponding +title quality entry.\"\n seqLines = preLines\n qualLines = []\n for i in range(len(seqLines)): # Quality characters should be the same length as the sequence\n qualLines.append( fh.readline().strip() )\n\n preLines,nextTitleLine=readTotitle(fh,'@')\n\n yield (seqTitle, ''.join(seqLines), ''.join(qualLines))", "def fasta_parser(filename):\n fasta = {}\n with open(filename) as f:\n contents = f.read()[1:].split('\\n>')\n for section in contents:\n sample = section.split('\\n')\n sample_id = sample[0]\n seq = ''.join(sample[1:]).strip()\n fasta[sample_id] = seq\n return fasta", "def main (fastq):\n\t\n\t\n\t\n\tfor record in SeqIO.parse(fastq, \"fastq\"):\n\t\t\n\t\tQ = record.letter_annotations[\"phred_quality\"]\n\n\t\tif record.id[-2:]==\"_1\":\n\t\t\n\t\t\tupperseq = SeqRecord( record.seq.reverse_complement(), id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q[::-1]\n\t\t\tprint upperseq.format(\"fastq\"),\n\t\t\n\t\telse:\n\t\t\tupperseq = SeqRecord( record.seq, id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q\t\t\t\n\t\t\tprint upperseq.format(\"fastq\"),", "def readFasta(self, fastaFile):\t\n\t\tname, seq = None, []\n\t\tfor line in fastaFile:\n\t\t\tline = line.rstrip()\n\t\t\tif (line.startswith(\">\")):\n\t\t\t\tif name: yield (name, ''.join(seq))\n\t\t\t\tname, seq = line, []\n\t\t\telse:\n\t\t\t\tseq.append(line)\n\t\tif name: yield (name, ''.join(seq))", "def read_fasta(sequence_file :str):\n\n #for gziped files:\n\n if sequence_file.endswith(\".gz\"):\n with gzip.open(sequence_file, \"rt\") as file:\n seqDict = SeqIO.to_dict(SeqIO.parse(file, 'fasta'))\n ident = ident.split(\"|\")[1]\n return seqDict\n\n # for no gziped fasta files:\n else:\n seqRecord = SeqIO.read(sequence_file, \"fasta\")\n sequence = seqRecord.seq\n ident = seqRecord.id\n ident = ident.split(\"|\")[1]\n return ident, sequence", "def test_parse_fasta_file(self):\r\n\r\n fasta_data = ['>seq1 SAMPLE1', 'AAACGT', '>seq2', 'ACGGT']\r\n\r\n expected_fasta = {'seq1': 'AAACGT', 'seq2': 'ACGGT'}\r\n\r\n expected_order = ['seq1 SAMPLE1', 'seq2']\r\n\r\n actual_fasta, actual_order = parse_fasta_file(fasta_data)\r\n\r\n self.assertEqual(actual_fasta, expected_fasta)\r\n\r\n self.assertEqual(actual_order, expected_order)", "def main(fileToCheck, minLength=-1, maxLength=-1):\n\n # Initialise variables.\n lineCount = 1 # The number of the line being examined. Used for displaying error messages.\n protDescription = True # Whether or not we are currently expecting a line starting with >.\n firstLine = True # Whether or not we are currently examining the first line of the file.\n proteinsInFile = {} # A dictionary indexed by the protein description line of the FASTA file.\n # The value of each entry is the correctly formatted protein sequence corresponding to the index.\n\n # Strip off all excess whitespace, and split the string into the individual lines of the file.\n checking = fileToCheck.rstrip()\n checking = checking.lstrip()\n checking = checking.split('\\n')\n for line in checking:\n line = line.rstrip()\n if firstLine:\n # True if we have just started parsing the file string, and haven;t yet examined any lines.\n if line[0] == '>':\n currentProt = line # Record the description line of the protein which is about to have its sequence inspected.\n currentSeq = '' # Initialise the sequence of the protein.\n protDescription = False # We are now expecting a protein sequence, not a protein description.\n firstLine = False\n else:\n # The first line of the file MUST be a protein description line (i.e. start with '>'). If the line was not\n # the beginning of a protein record, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to start with a >, but instead got: \" + line\n return 1, errorMessage\n elif protDescription:\n # This is true only if a line beginning with a '>' is expected.\n if line[0] == '>':\n # Expected a protein description line, and found a protein description line. This means that the entire sequence\n # of the currentProt protein has been found (i.e. we have finished inspecting the sequence of a protein, and\n # have found the protein to be valid). Now determine if the length of the sequence is within the user\n # specified bounds.\n if minLength == -1:\n if maxLength == -1:\n # If there are no restrictions on the protein sequence length, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n # If there is no minimum length restriction, and the protein sequence is not longer than the maximum\n # sequence length permitted, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) >= minLength:\n if maxLength == -1:\n # If there is no maximum length restriction, and the protein sequence is not shorter than the minimum\n # sequence length permitted, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n # If the protein sequence is not shorter than the minimum sequence length permitted and not longer\n # than the maximum length permitted, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n currentProt = line # Record the description line of the protein which is about to have its sequence inspected.\n currentSeq = '' # Initialise the sequence of the protein.\n protDescription = False # We are now expecting a protein sequence, not a protein description.\n else:\n # If the line does not begin with a '>', and it is expected to, it is possible that the amino acid sequence\n # is split over multiple lines.\n if line.isalpha():\n # If every character on the line is a letter, then the line contains a valid portion of the sequence.\n # Add the uppercase version of the sequence portion to the sequence currently being recorded.\n currentSeq += line.upper()\n else:\n # If the line did not contain only letters, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to start with a >, but instead got: \" + line\n return 1, errorMessage\n else:\n # If an amino acid sequence is expected.\n if line.isalpha():\n # If the line is all alphabetic characters, write the line out and indicate that we are expecting a\n # protein description line next (i.e. one beginning with a '>').\n currentSeq += line.upper()\n protDescription = True\n else:\n # If the line did not contain only letters, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to contain only letters, but instead got: \" + line\n return 2, errorMessage\n\n lineCount += 1\n\n # Catch the final protein from the file, and determine whether it should be recorded.\n if minLength == -1:\n if maxLength == -1:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) >= minLength:\n if maxLength == -1:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n proteinsInFile[currentProt] = currentSeq\n\n if len(proteinsInFile.keys()) < 2:\n # There are too few protein sequences entered\n errorMessage = (\"Not enough unique protein sequences have been entered.\" +\n \" This is possibly caused by not enough sequences of the required minimum and maximum length being provided.\"\n )\n return 3, errorMessage\n elif protDescription:\n # Return an indication that the FASTA file is correctly formatted.\n outputString = ''\n for i in proteinsInFile.keys():\n outputString += i + '\\n' + proteinsInFile[i] + '\\n'\n return 0, outputString[:-1]\n else:\n # The file did not end with a protein sequence.\n errorMessage = \"Reached the end of the file, but no protein sequence found for the final protein.\"\n return 3, errorMessage", "def readFASTA(filename, alpha = None, string_only = False):\n seqlist = []\n seqname = None\n seqinfo = None\n seqdata = []\n fh = open(filename)\n thisline = fh.readline()\n while (thisline):\n if (thisline[0] == '>'): # new sequence\n if (seqname): # take care of the data that is already in the buffer before processing the new sequence\n try:\n if (string_only):\n seqnew = ''.join(seqdata)\n else:\n seqnew = Sequence(seqdata, alpha, seqname, seqinfo)\n seqlist.append(seqnew)\n except RuntimeError as e:\n print(\"Warning: \"+seqname+\" is invalid (ignored): \", e, file=sys.stderr)\n seqinfo = thisline[1:-1] # everything on the defline is \"info\"\n seqname = seqinfo.split()[0] # up to first space\n seqdata = []\n else: # pull out the sequence data\n cleanline = thisline.split()\n for line in cleanline:\n seqdata.extend(tuple(line.strip('*'))) # sometimes a line ends with an asterisk in FASTA files\n thisline = fh.readline()\n\n if (seqname):\n try:\n if (string_only):\n seqnew = ''.join(seqdata)\n else:\n seqnew = Sequence(seqdata, alpha, seqname, seqinfo)\n seqlist.append(seqnew)\n except RuntimeError as e:\n print(\"Warning: \" + seqname + \" is invalid (ignored): \", e, file=sys.stderr)\n else:\n raise RuntimeError(\"No sequences on FASTA format found in this file\")\n fh.close()\n return seqlist", "def read_fasta(fp):\n name, seq = None, []\n for line in fp:\n line = line.rstrip()\n if line.startswith(\">\"):\n if name: yield (name, ''.join(seq))\n name, seq = line, []\n else:\n seq.append(line)\n if name: yield (name, ''.join(seq))", "def iseq_to_qseq_fields(line, barcode_in_header,\r\n barcode_length, barcode_qual_c='b'):\r\n record = line.strip().split(':')\r\n rec_0_1, rec_0_2 = record[0].split('_')\r\n rec_4_1, rec_4_23 = record[4].split('#')\r\n rec_4_2, rec_4_3 = rec_4_23.split('/')\r\n if barcode_in_header:\r\n barcode = rec_4_2[:barcode_length]\r\n sequence = record[5]\r\n barcode_qual = barcode_qual_c * barcode_length\r\n sequence_qual = record[6]\r\n else:\r\n barcode = record[5][:barcode_length]\r\n sequence = record[5][barcode_length:]\r\n barcode_qual = record[6][:barcode_length]\r\n sequence_qual = record[6][barcode_length:]\r\n return (rec_0_1, rec_0_2, record[1], record[2], record[3],\r\n rec_4_1, rec_4_2, rec_4_3), sequence, sequence_qual,\\\r\n barcode, barcode_qual", "def parse_joined_fastq(path: Path, counts: Mapping[str, int]) -> Generator[SeqRecord, None, None]:\n sequence_id_map = dict()\n\n for record in SeqIO.parse(path, format=\"fastq\"):\n try:\n sequence_id = sequence_id_map[str(record.seq)]\n except KeyError:\n sequence_id = f\"read_len_{len(sequence_id_map) + 1}\"\n sequence_id_map[str(record.seq)] = sequence_id\n\n yield SeqRecord(record.seq, id=sequence_id)\n\n counts[sequence_id] += 1", "def readSeq(seqFile):\n line = seqFile.readline()\n seq1 = line.rstrip()\n line = seqFile.readline()\n seq2 = line.rstrip()\n return (seq1, seq2)", "def parse_fastq (rec_lines):\n data = []\n data.append(rec_lines[0][1:])\n data.append(rec_lines[1])\n data.append(rec_lines[3])\n return data", "def parse(self, filehandle):\n l = filehandle.readline()\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n self.readalign(la[1:], filehandle)\n else:\n## print \"end of records\"\n return\n\n l=filehandle.readline()", "def test_make_fasta_rec(self):\r\n header = '>E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0'\r\n seq = 'CTGGTC'\r\n qual = map(int, '32 32 32 19 19 19'.split())\r\n self.assertEqual(make_fastq_rec(header, seq, qual),\r\n \"\"\"@E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nCTGGTC\r\n+E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nAAA444\"\"\")", "def parse(record):\n\n #Extract individual parts of the FASTA record\n\n identifier = record.id #The sequence's Id\n sequence = record.seq #The sequence itself\n sequence = sequence.upper() #Turns all the nucleotides to upper case\n\n return identifier, sequence", "def stream_fastq(fqfile):\n\n if fqfile.endswith('.gz'):\n qin = gzip.open(fqfile, 'rb')\n else:\n qin = open(fqfile, 'r')\n\n while True:\n header = qin.readline()\n if not header:\n break\n header = header.strip()\n seqidparts = header.split(' ')\n seqid = seqidparts[0]\n seq = qin.readline()\n seq = seq.strip()\n qualheader = qin.readline()\n qualscores = qin.readline()\n qualscores = qualscores.strip()\n header = header.replace('@', '', 1)\n yield seqid, header, seq, qualscores", "def process_fastq_single_end_read_file(fastq_read_f,\r\n fastq_barcode_f,\r\n barcode_to_sample_id,\r\n store_unassigned=False,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length_fraction=0.75,\r\n rev_comp=False,\r\n rev_comp_barcode=False,\r\n seq_max_N=0,\r\n start_seq_id=0,\r\n filter_bad_illumina_qual_digit=False,\r\n log_f=None,\r\n histogram_f=None,\r\n barcode_correction_fn=None,\r\n max_barcode_errors=1.5,\r\n strict_header_match=True,\r\n phred_to_ascii_f=None):\r\n header_index = 0\r\n sequence_index = 1\r\n quality_index = 2\r\n\r\n seq_id = start_seq_id\r\n # grab the first lines and then seek back to the beginning of the file\r\n try:\r\n fastq_read_f_line1 = fastq_read_f.readline()\r\n fastq_read_f_line2 = fastq_read_f.readline()\r\n fastq_read_f.seek(0)\r\n except AttributeError:\r\n fastq_read_f_line1 = fastq_read_f[0]\r\n fastq_read_f_line2 = fastq_read_f[1]\r\n\r\n post_casava_v180 = is_casava_v180_or_later(fastq_read_f_line1)\r\n if post_casava_v180:\r\n offset = 33\r\n check_header_match_f = check_header_match_180_or_later\r\n else:\r\n offset = 64\r\n check_header_match_f = check_header_match_pre180\r\n\r\n # compute the barcode length, if they are all the same.\r\n # this is useful for selecting a subset of the barcode read\r\n # if it's too long (e.g., for technical reasons on the sequencer)\r\n barcode_lengths = set([len(bc)\r\n for bc, sid in barcode_to_sample_id.items()])\r\n if len(barcode_lengths) == 1:\r\n barcode_length = barcode_lengths.pop()\r\n else:\r\n barcode_length = None\r\n\r\n # compute the minimum read length as a fraction of the length of the input\r\n # read\r\n min_per_read_length = min_per_read_length_fraction * \\\r\n len(fastq_read_f_line2)\r\n\r\n # prep data for logging\r\n input_sequence_count = 0\r\n count_barcode_not_in_map = 0\r\n count_too_short = 0\r\n count_too_many_N = 0\r\n count_bad_illumina_qual_digit = 0\r\n count_barcode_errors_exceed_max = 0\r\n sequence_lengths = []\r\n seqs_per_sample_counts = {}\r\n for bc_data, read_data in izip(\r\n parse_fastq(fastq_barcode_f, strict=False, phred_offset=offset),\r\n parse_fastq(fastq_read_f, strict=False, phred_offset=offset)):\r\n input_sequence_count += 1\r\n # Confirm match between barcode and read headers\r\n if strict_header_match and \\\r\n (not check_header_match_f(bc_data[header_index], read_data[header_index])):\r\n raise FastqParseError(\"Headers of barcode and read do not match. Can't continue. \"\r\n \"Confirm that the barcode fastq and read fastq that you are \"\r\n \"passing match one another.\")\r\n else:\r\n header = read_data[header_index]\r\n\r\n # Grab the barcode sequence\r\n if barcode_length:\r\n # because thirteen cycles are sometimes used for\r\n # techical reasons, this step looks only at the\r\n # first tweleve bases. note that the barcode is\r\n # rev-comp'ed after this step if requested since\r\n # the thirteen base is a technical artefact, not\r\n # barcode sequence.\r\n barcode = bc_data[sequence_index][:barcode_length]\r\n else:\r\n barcode = bc_data[sequence_index]\r\n if rev_comp_barcode:\r\n barcode = str(DNA(barcode).rc())\r\n # Grab the read sequence\r\n sequence = read_data[1]\r\n # Grab the read quality\r\n quality = read_data[2]\r\n\r\n # correct the barcode (if applicable) and map to sample id\r\n num_barcode_errors, corrected_barcode, correction_attempted, sample_id = \\\r\n correct_barcode(\r\n barcode,\r\n barcode_to_sample_id,\r\n barcode_correction_fn)\r\n # skip samples with too many errors\r\n if (num_barcode_errors > max_barcode_errors):\r\n count_barcode_errors_exceed_max += 1\r\n continue\r\n\r\n # skip unassignable samples unless otherwise requested\r\n if sample_id is None:\r\n if not store_unassigned:\r\n count_barcode_not_in_map += 1\r\n continue\r\n else:\r\n sample_id = 'Unassigned'\r\n\r\n quality_filter_result, sequence, quality =\\\r\n quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length,\r\n phred_quality_threshold,\r\n min_per_read_length,\r\n seq_max_N,\r\n filter_bad_illumina_qual_digit)\r\n\r\n # process quality result\r\n if quality_filter_result != 0:\r\n # if the quality filter didn't pass record why and\r\n # move on to the next record\r\n if quality_filter_result == 1:\r\n count_too_short += 1\r\n elif quality_filter_result == 2:\r\n count_too_many_N += 1\r\n elif quality_filter_result == 3:\r\n count_bad_illumina_qual_digit += 1\r\n else:\r\n raise ValueError(\r\n \"Unknown quality filter result: %d\" %\r\n quality_filter_result)\r\n continue\r\n\r\n sequence_lengths.append(len(sequence))\r\n\r\n try:\r\n seqs_per_sample_counts[sample_id] += 1\r\n except KeyError:\r\n seqs_per_sample_counts[sample_id] = 1\r\n\r\n if rev_comp:\r\n sequence = str(DNA(sequence).rc())\r\n quality = quality[::-1]\r\n\r\n fasta_header = '%s_%s %s orig_bc=%s new_bc=%s bc_diffs=%d' %\\\r\n (sample_id, seq_id, header, barcode,\r\n corrected_barcode, num_barcode_errors)\r\n yield fasta_header, sequence, quality, seq_id\r\n seq_id += 1\r\n\r\n # Add sample IDs with zero counts to dictionary for logging\r\n for curr_sample_id in barcode_to_sample_id.values():\r\n if curr_sample_id not in seqs_per_sample_counts.keys():\r\n seqs_per_sample_counts[curr_sample_id] = 0\r\n\r\n if log_f is not None:\r\n log_str = format_split_libraries_fastq_log(count_barcode_not_in_map,\r\n count_too_short,\r\n count_too_many_N,\r\n count_bad_illumina_qual_digit,\r\n count_barcode_errors_exceed_max,\r\n input_sequence_count,\r\n sequence_lengths,\r\n seqs_per_sample_counts)\r\n log_f.write(log_str)\r\n\r\n if len(sequence_lengths) and histogram_f is not None:\r\n counts, bin_edges = make_histograms(sequence_lengths)\r\n histogram_str = format_histogram_one_count(counts, bin_edges)\r\n histogram_f.write(histogram_str)\r\n histogram_f.write('\\n--\\n\\n')", "def condolidateReads(options):\n input_filename=options.adapter_trimmed_filename\n output_filename=options.consolidated_filename\n fhw=open(output_filename,\"w\")\n #original_data=readFastqFile(input_filename)\n fhr=open(input_filename,\"r\")\n data={}\n while True:\n line=fhr.readline().strip()\n if not line:\n break\n id=line\n seq=fhr.readline().strip()\n useless=fhr.readline()\n quality=fhr.readline()\n if seq not in data:\n data[seq]=1\n else:\n data[seq]+=1\n for seq_num,seq in enumerate(data):\n fhw.write(\">read_\"+str(seq_num+1)+\"_\"+str(data[seq])+\"\\n\"+seq+\"\\n\")\n fhw.close()", "def Parse_Fasta(filename):\n dic = {}\n name = None\n seq = ''\n with open(filename) as F:\n for line in F:\n if line.startswith('>'):\n if name is not None:\n dic[name] = seq\n seq = ''\n name = line.strip()\n else:\n seq += line\n if not name in dic:\n dic[name] = seq\n return dic", "def parse_fasta(io):\n id = None\n sequence = \"\"\n for line in io:\n line = line.strip()\n if line == \"\":\n continue\n if line[0] == \">\":\n if id:\n yield (id, sequence)\n id = line[1:].strip()\n sequence = \"\"\n else:\n sequence += line\n if id:\n yield (id, sequence)", "def test_iseq_to_qseq_fields(self):\r\n i = \"HWI-ST753_50:6:1101:15435:9071#0/1:ACCAGACGATGCTACGGAGGGAGCTAGCGTTGTTCGGAATTACTGGGCGTAAAGCGCACGTAGGCGGCTTTGTAAGTTAGAGGTGAAAGCCTGGAGCTCAAC:gggggggfggdegggggggggggggggggggegggggggggegggggggeggcccccFUZSU_]]^^ggggggdggdgeeeccYacadcbeddceegggeeg\"\r\n # barcode in sequence, barcode length = 12\r\n expected = (\r\n (\"HWI-ST753\", \"50\", \"6\", \"1101\", \"15435\", \"9071\", \"0\", \"1\"),\r\n \"TACGGAGGGAGCTAGCGTTGTTCGGAATTACTGGGCGTAAAGCGCACGTAGGCGGCTTTGTAAGTTAGAGGTGAAAGCCTGGAGCTCAAC\", \"gggggggggggggggggggegggggggggegggggggeggcccccFUZSU_]]^^ggggggdggdgeeeccYacadcbeddceegggeeg\", \"ACCAGACGATGC\", \"gggggggfggde\")\r\n self.assertEqual(\r\n iseq_to_qseq_fields(i, barcode_in_header=False, barcode_length=12),\r\n expected)\r\n # barcode in sequence, barcode length = 6\r\n expected = (\r\n (\"HWI-ST753\", \"50\", \"6\", \"1101\", \"15435\", \"9071\", \"0\", \"1\"),\r\n \"CGATGCTACGGAGGGAGCTAGCGTTGTTCGGAATTACTGGGCGTAAAGCGCACGTAGGCGGCTTTGTAAGTTAGAGGTGAAAGCCTGGAGCTCAAC\", \"gfggdegggggggggggggggggggegggggggggegggggggeggcccccFUZSU_]]^^ggggggdggdgeeeccYacadcbeddceegggeeg\", \"ACCAGA\", \"gggggg\")\r\n self.assertEqual(\r\n iseq_to_qseq_fields(i, barcode_in_header=False, barcode_length=6),\r\n expected)\r\n\r\n # barcode in header, barcode length = 6\r\n i = \"HWI-6X_9267:1:1:4:1699#ACCACCC/1:TACGGAGGGTGCGAGCGTTAATCGCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGAAAAAAAAAAAAAAAAAAAAAAA:abbbbbbbbbb`_`bbbbbb`bb^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaDaabbBBBBBBBBBBBBBBBBBBB\"\r\n expected = ((\"HWI-6X\", \"9267\", \"1\", \"1\", \"4\", \"1699\", \"ACCACCC\", \"1\"),\r\n \"TACGGAGGGTGCGAGCGTTAATCGCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGAAAAAAAAAAAAAAAAAAAAAAA\", \"abbbbbbbbbb`_`bbbbbb`bb^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaDaabbBBBBBBBBBBBBBBBBBBB\", \"ACCACC\", \"bbbbbb\")\r\n self.assertEqual(\r\n iseq_to_qseq_fields(i, barcode_in_header=True, barcode_length=6),\r\n expected)\r\n # barcode in header, barcode length = 3\r\n expected = ((\"HWI-6X\", \"9267\", \"1\", \"1\", \"4\", \"1699\", \"ACCACCC\", \"1\"),\r\n \"TACGGAGGGTGCGAGCGTTAATCGCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGAAAAAAAAAAAAAAAAAAAAAAA\", \"abbbbbbbbbb`_`bbbbbb`bb^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaDaabbBBBBBBBBBBBBBBBBBBB\", \"ACC\", \"bbb\")\r\n self.assertEqual(\r\n iseq_to_qseq_fields(i, barcode_in_header=True, barcode_length=3),\r\n expected)", "def parser(raw_seq, date):\n taxon_id = int(raw_seq.features[0].qualifiers['db_xref'][0][6:])\n organism = raw_seq.annotations['organism']\n accession = raw_seq.annotations['accessions'][0]\n gene = []\n records = []\n frag_type = 'whole'\n begin = 1\n end = len(raw_seq)\n sequence = str(raw_seq.seq)\n name = organism\n strand = 1\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n for i in raw_seq.features:\n if i.type == 'gene' and 'gene' in i.qualifiers:\n if i.location_operator != 'join':\n frag_type = 'gene'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.location_operator == 'join':\n frag_type = 'gene'\n begin = int(i.sub_features[0].location.start)\n end = int(i.sub_features[0].location.end)\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n sequence = ''\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n gene.append(rec)\n begin = int(i.sub_features[1].location.start)\n end = int(i.sub_features[1].location.end)\n sequence = ''.join([str(raw_seq.seq[begin:end]), str(raw_seq.seq[begin:end])])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'CDS' and 'gene' in i.qualifiers:\n frag_type = 'cds'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'tRNA' and 'gene' in i.qualifiers:\n frag_type = 'tRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if len(sequence) >= 100:\n sequence = ''\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'rRNA':\n frag_type = 'rRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['product'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'exon' and 'gene' in i.qualifiers:\n frag_type = 'exon'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if 'number' in i.qualifiers:\n name = '{0}_exon_{1}'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_exon'.format(i.qualifiers['gene'][0])\n strand = int(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'intron' and 'gene' in i.qualifiers:\n frag_type = 'intron'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n strand = str(i.location.strand)\n if 'number' in i.qualifiers:\n name = '{0}_{1}_intron'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_intron'.format(i.qualifiers['gene'][0])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n\n records.append(rec)\n gene.sort(key=lambda x: x[5])\n\n for i in range(len(gene) - 1):\n frag_type = 'spacer'\n now = gene[i]\n then = gene[i + 1]\n tail = now[6] + 1\n head = then[5] - 1\n sequence = str(raw_seq.seq[tail:head])\n name = '{0}-{1}_spacer'.format(now[3], then[3])\n strand = 0\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n records.extend(gene)\n\n database.extend(records)", "def parseFasta(fh):\n\n record_seq = []\n record_id = None\n\n for line in fh:\n line = line.strip(\"\\n\")\n\n if line.startswith(\">\"):\n\n if record_seq:\n yield Record(record_id, \"\".join(record_seq))\n\n record_id = line[1:].split()[0]\n record_seq = []\n else:\n record_seq.append(line.replace(\"*\", \"-\"))\n\n if record_seq:\n yield Record(record_id, \"\".join(record_seq))", "def parse_text(filehandle: TextIO) -> Iterator[Fasta]:\n\n # Check that the file looks like UniProt text format\n first_line = next(filehandle)\n if not first_line.startswith(\"ID\"):\n raise TextParserError(\n \"Unexpected file format: first line of UniProt text file should start with 'ID'\"\n )\n filehandle.seek(0)\n\n fasta = Fasta(sequence=\"\")\n for line in filehandle:\n key = line[:2] # This is more efficient than using line.startswith\n if key == \"ID\":\n tokens = line.split()\n fasta.entry_name = tokens[1]\n fasta.reviewed = True if tokens[2] == \"Reviewed;\" else False\n elif key == \"AC\":\n if fasta.accession is None:\n accessions = line[5:].rstrip(\";\\n\").split(\"; \")\n fasta.accession = accessions[0]\n elif key == \"DT\":\n if \"sequence version\" in line:\n tokens = line[5:].strip(\".\\n\").split()\n fasta.version = int(tokens[3])\n elif key == \"DE\":\n if \"RecName\" in line:\n fasta.name = _extract_name(line)\n # Get the first SubName if no RecName found\n elif fasta.name is None and line[5:12] == \"SubName\":\n fasta.name = _extract_name(line)\n elif line[5:10] == \"Flags\" and \"Fragment\" in line:\n fasta.fragment = True\n elif key == \"GN\":\n if line[5:10] == \"Name=\":\n tokens = line[10:].split(\";\")\n # Remove evidence tags, if present\n gene_tokens = tokens[0].split(\" {\")\n fasta.gene = gene_tokens[0]\n elif key == \"OS\":\n # TODO: check for multiline species name (excluding brackets)\n if fasta.species is None:\n species_line = line[5:].strip().split(\" (\")\n fasta.species = species_line[0].strip(\".\")\n elif key == \"OX\":\n if \"NCBI_TaxID\" in line:\n tokens = line[5:].strip(\";\\n\").split(\"; \")\n # Remove evidence tag if present\n taxid_tokens = tokens[0][11:].split(\" {\")\n fasta.taxid = taxid_tokens[0]\n elif key == \"PE\":\n fasta.evidence = int(line[5])\n elif key == \" \":\n sequence_line = line.strip().replace(\" \", \"\")\n fasta.sequence += sequence_line\n elif key == \"//\":\n yield fasta\n fasta = Fasta(sequence=\"\")", "def readSequences(lines):\n seqs = []\n label = None\n seq_lines = []\n for line in lines:\n line = line.strip() # strip off white space\n if not line: # skip empty lines\n continue\n if line.startswith(';'): # ignore comment lines\n continue\n # check for start of next sequence:\n if line.startswith('>'): # label line\n # first, store the previous sequence if we had one:\n if seq_lines:\n seqs.append(Sequence(label, ''.join(seq_lines)))\n seq_lines = []\n # get the label (name) for the next sequence\n label = line[1:].strip()\n else:\n # collect all lines with sequence information for this sequence:\n seq_lines.append(line)\n # take care of the last sequence in the file\n seqs.append(Sequence(label, ''.join(seq_lines)))\n return seqs", "def quality_matcher(fasta, full_fastq, filt_fastq, trunclen):\n with open(fasta, \"r\") as fasta, open(full_fastq, \"r\") as fastq, open(filt_fastq, \"w\") as new_fastq:\n #make lists of the fasta and fastq files, where every successive value is a successive line\n #purpose of -1: to avoid the \"\\n\" newline character at the end of the lines\n fastq_list = [line[:-1] for line in fastq]\n fasta_list = [line[:-1] for line in fasta]\n #iterate through the sequence ids in the fasta file\n for fasta_index, fasta_id in enumerate(fasta_list):\n if fasta_id[0] == \">\":\n #get the list index of the matching sequence id in the metagenomic fastq file\n fastq_index = fastq_list.index(\"@{}\".format(fasta_id[1:]))\n #print and write a new fastq entry with the quality scores string truncated to the same length as the sequence from the fasta file\n print(str(\"@{}\".format(fasta_id[1:])) + \"\\n\" + str(fasta_list[fasta_index+1]) + \"\\n\" + str(\"+{}\".format(fasta_id[1:])) + \"\\n\" + str(fastq_list[fastq_index+3][:int(trunclen)]))\n new_fastq.write(str(\"@{}\".format(fasta_id[1:])) + \"\\n\" + str(fasta_list[fasta_index+1]) + \"\\n\" + str(\"+{}\".format(fasta_id[1:])) + \"\\n\" + str(fastq_list[fastq_index+3][:int(trunclen)]))", "def read_fasta(fasta_name):\n \n \"\"\"first open the file outside \"\"\"\n file_handler = open(fasta_name)\n\n # ditch the boolean (x[0]) and just keep the header or sequence since\n # we know they alternate.\n fasta_iter = (x[1] for x in groupby(file_handler, lambda line: line[0] == \">\"))\n\n for header in fasta_iter:\n # drop the \">\"\n headerStr = header.__next__()[1:].strip()\n\n # join all sequence lines to one.\n seq = \"\".join(s.strip() for s in fasta_iter.__next__())\n\n # yield (headerStr, seq)\n result_record = {'header':headerStr,'seqRecord':seq}\n return result_record", "def readFastaFile(filename):\n info={}\n fhr=open(filename,\"r\")\n while(True):\n line=fhr.readline()\n if not line: break\n if(\">\" in line):\n try:\n info[line.strip()[1:].split()[0]]=fhr.readline().strip()\n except ValueError:\n pass\n return info", "def read_fasta(file_path=\"\"):\n\n line = \"\"\n\n try:\n fasta_handle = open(file_path,\"r\")\n except:\n raise IOError(\"Your input FASTA file is not right!\")\n\n # make sure the file is not empty\n while True:\n line = fasta_handle.readline()\n if line == \"\":\n return\n if line[0] == \">\":\n break\n\n # when the file is not empty, we try to load FASTA file\n while True:\n if line[0] != \">\":\n raise ValueError(\"Records in Fasta files should start with '>' character\")\n title = line[1:].rstrip()\n lines = []\n line = fasta_handle.readline()\n while True:\n if not line:\n break\n if line[0] == \">\":\n break\n lines.append(line.rstrip())\n line = fasta_handle.readline()\n\n yield title,\"\".join(lines).replace(\" \",\"\").replace(\"\\r\",\"\")\n\n if not line:\n return\n\n fasta_handle.close()\n assert False, \"Your input FASTA file have format problem.\"", "def readFasta(self, fp):\n\t\t\n\t\tfor head, seq in self.parseFasta(fp):\n\t\t\t#analyzing the sequence\n\t\t\tself.analyzeSequence(seq)\n\t\t\t#saving the header\n\t\t\tif head == '':\n\t\t\t\tcontinue\n\t\t\telse:\t\n\t\t\t\tself.header.append(head)", "def test_iter_fastq(self):\r\n from StringIO import StringIO\r\n fasta = \"\"\">M32Nstr_1 039732_1312_3088 orig_bc=CTCGTGGAGTAG new_bc=CTCGTGGAGTAG bc_diffs=0\r\nCATGCTGCCTCCCGTAGGAGTCTGGGCCGTATCTCAGTCCCAATGTGGCCGGTCACCCTCTCAGGCCGGCTACCCGTCAAAGCCTTGGTAAGCCACTACCCCACCAACAAGCTGATAAGCCGCGAGTCCATCCCCAACCGCCGAAACTTTCCAACCCCCACCCATGCAGCAGGAGCTCCTATCCGGTATTAGCCCCAGTTTCCTGAAGTTATCCCAAAGTCAAGGGCAGGTTACTCACGTGTTACTCACCCGTTCGCCA\r\n>F22Frhd_2 040027_1369_1966 orig_bc=CAAGTGAGAGAG new_bc=CAAGTGAGAGAG bc_diffs=0\r\nCATGCTGCCTCCCGTAGGAGTCTGGGCCGTATCTCAGTCCCAATGTGGCCGGTCACCCTCTCAGGCCGGCTACCCGTCAAAGCCTTGGTAAGCCACTACCCCACCAACAAGCTGATAAGCCGCGAGTCCATCCCCAACCGCCGAAACTTTCCAACCCCCACCCATGCAGCAGGAGCTCCTATCCGGTATTAGCCCCAGTTTCCTGAAGTTATCCCAAAGTCAAGGGCAGGTTACTCACGTGTTACTCACCCGTTCGCCA\r\n>F12Labi_3 040135_0934_1957 orig_bc=AGTTAGTGCGTC new_bc=AGTTAGTGCGTC bc_diffs=0\r\nCATGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTACTGATCGTTGCCTTGGTGGGCCGTTACCCCGCCAACAAGCTAATCAGACGCATCCCCATCCATAACCGATAAATCTTTATTCGTAATCTCATGAGATCAAACGAATACATAAGGTATTAGTCCAACTTTGCTGGGTTAGTCCCTTACGTTATTGGGCGAGGTTGGATACGCGTTACTCACCCGTGCGCCGGTCGCCG\r\n\"\"\".splitlines()\r\n qual_raw = \"\"\">039695_0364_2008 length=49 uaccno=FFLHOYS01A5986\r\n35 35 35 35 35 35 35 35 35 32 30 30 33 33 35 35 35 35 35 34 34 34 36 36 36 36 36 35 35 36 36 36 36 36 40 37 37 37 37 38 39 38 37 38 36 35 35 35 35\r\n>039732_1312_3088 length=271 uaccno=FFLHOYS01DHI8I\r\n37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37\r\n37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37\r\n37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 38 38 33 33 34 34 36 36 37 37 35 24 19 19 19 38 38 37 37 37\r\n37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 38 38 38 38 38 37 38 38 38 38 38 38 38 37 37 38 38 38 31 31 33 36 33 33 33 36 36 36 36 24 25 25 28 31 36 36 36 36 36 36 36 38\r\n38 38 40 40 38 32 31 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 30 30 30 31 32 32 32\r\n>040027_1369_1966 length=271 uaccno=FFLHOYS01DMIIO\r\n37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37\r\n37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 34 34 34 34 37 37 37 37 37 37\r\n37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 26 26 24 38 32 22 22 15 15 15 15 15 20 16 16 16 38 38 37 37 37\r\n37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 37 38 38 34 34 34 37 37 38 28 28 27 36 33 33 33 36 36 36 36 32 32 32 33 36 36 36 38 37 37 36 37 38\r\n38 38 38 38 38 31 31 32 32 32 32 32 32 32 32 32 32 32 32 31 28 28 28 32 31 31 31 31 32 32 32\r\n>040135_0934_1957 length=281 uaccno=FFLHOYS01CKBO3\r\n33 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 40 40 40 40 38 38 38 39 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 35 35 35 35 35 35 35 35 35 35 35 35 35 28 28\r\n28 28 28 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 33 26 26 26 26 33 35 35 35 35 35\r\n35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 26 26 26 30 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35\r\n35 35 30 30 30 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 27 27 25 15 15 15 18 18 25 15 15 15 15 15 15 14 15 15 15 15 15 15 15 14 15 15 15 15 15 15 23 23 28\r\n28 24 30 31 32 22 22 16 16 16 16 22 22 23 25 21 21 21 21 21 19 21 16 16 16 16 16 22 21 23 25 25 25 21 22 22 22 22 22 22 22\r\n\"\"\".splitlines()\r\n qual = parse_qual_score(qual_raw)\r\n result = list(iter_fastq(fasta, qual))\r\n self.assertEqual(len(result), 3)\r\n self.assertEqual(result[0][1], 'M32Nstr_1')\r\n self.assertEqual(result[1][1], 'F22Frhd_2')\r\n self.assertEqual(result[2][1], 'F12Labi_3')\r\n\r\n lines = result[0][0].splitlines()\r\n self.assertEqual(lines[1][:5], 'CATGC')\r\n self.assertEqual(lines[3][:5], chr(33 + 37) * 5)\r\n self.assertEqual(\r\n lines[3][-5:], ''.join(map(chr, [33 + 30, 33 + 31, 33 + 32, 33 + 32, 33 + 32])))", "def parse_fasta(f, trim_desc=False):\n \n f = iter(f)\n desc = next(f).strip()[1:]\n if trim_desc:\n desc = desc.split()[0]\n seq = StringIO()\n for line in f:\n line = line.strip()\n if line.startswith(\">\"):\n yield desc, seq.getvalue()\n desc = line[1:]\n if trim_desc:\n desc = desc.split()[0]\n seq = StringIO()\n else:\n seq.write(line.replace(\" \", \"\").replace(\"U\", \"T\"))\n yield desc, seq.getvalue()", "def ReadFASTA(fastafile):\n lines = open(fastafile).readlines()\n headers_seqs = []\n header = None\n seq = []\n for line in lines:\n if line[0] == '>':\n if (not header) and (not seq):\n pass # first sequence in file\n elif header and not seq:\n raise ValueError, \"Empty sequence for %s\" % header\n elif seq and not header:\n raise ValueError, \"File does not begin with header.\"\n else:\n seq = ''.join(seq)\n seq = seq.replace(' ', '')\n headers_seqs.append((header, seq))\n header = line.strip()[1 : ]\n seq = []\n else:\n seq.append(line.strip())\n if (not header) and (not seq):\n pass # first sequence in file\n elif header and not seq:\n raise ValueError, \"Empty sequence for %s\" % header\n elif seq and not header:\n raise ValueError, \"File does not begin with header.\"\n else:\n seq = ''.join(seq)\n seq = seq.replace(' ', '')\n headers_seqs.append((header, seq))\n return headers_seqs", "def parse_dna(genome_file, query):\n\n # Don't extract the text in the beginning\n extract = False\n seq = \"\"\n\n # Read file line by line\n with open(genome_file, 'r') as file:\n for line in file:\n\n # Extract line with the sequence. Exclude trailing characters\n if extract:\n seq += str(line.rstrip())\n\n # Stop extraction after new sequence entry (new chromosome)\n if line[0] == '>':\n extract = False\n\n # Start extraction after the query line.\n if line[:6] == query:\n extract = True\n\n return seq", "def test_process_fastq_single_end_read_file_error_on_header_mismatch(self):\r\n fastq_f = [\r\n \"@990:2:4:11272:5533#1/1\",\r\n \"GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC\",\r\n \"+\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"]\r\n barcode_fastq_f = [\r\n \"@990:2:4:11272:5532#1/2\",\r\n \"TTTTTTTTTTTT\",\r\n \"+\",\r\n \"bbbbbbbbbbbb\"]\r\n barcode_to_sample_id = {'AAAAAAAAAAAA': 's1'}\r\n actual = process_fastq_single_end_read_file(\r\n fastq_f,\r\n barcode_fastq_f,\r\n barcode_to_sample_id,\r\n store_unassigned=False,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length_fraction=0.75,\r\n rev_comp=False,\r\n rev_comp_barcode=False,\r\n seq_max_N=0,\r\n start_seq_id=0)\r\n self.assertRaises(FastqParseError, list, actual)", "def parse_fasta(fasta_filename):\n\n sequences = {}\n\n with open(fasta_filename, \"r\") as fasta:\n\n # do our best to accept any input that looks vaguely valid\n for line in fasta:\n \n if line.startswith(\">\"):\n # take everything up to the first space as the id\n # get rid of the leading >\n # and get rid of the newline\n fasta_id = line.split(\" \")[0].replace(\">\", \"\", 1).rstrip('\\n')\n \n seq = []\n wholeseq = ''\n if fasta_id == \"\":\n raise Exceptions.MissingId(\"invalid if there is no fasta_id\")\n \n else:\n seq.append(line.rstrip('\\n'))\n # handle sequences on multiple lines\n wholeseq = \"\".join(seq)\n if len(wholeseq) == 0:\n raise Exceptions.MissingSequence(\"invalid if there is no sequence\")\n sequences[fasta_id] = wholeseq\n\n if len(sequences) == 0:\n raise Exceptions.EmptyFasta(\"invalid if there is nothing in the fasta file\")\n\n return sequences", "def parse_fasta(data):\n name, seq = None, []\n for line in data:\n line = line.rstrip()\n if line.startswith('>'):\n if name:\n yield (name, ''.join(seq))\n name, seq = line, []\n else:\n seq.append(line)\n if name:\n yield (name, ''.join(seq))", "def get_read_alignments(sam_f):\n sparser = samparser.SamParser(sam_f=sam_f, aligned_only=True, mapq=20, mismatches=1)\n \n # parse all the hits into this to make sure multi mapping hits map to the same contig\n hit_dict = {}\n ambig_reads = 0\n processed_reads = 0\n for hit in sparser.parse_sam_file():\n processed_reads += 1\n if hit_dict.get(hit['qname'], 0):\n if hit_dict[hit['qname']] != hit['rname']:\n print(\"Warning read: {} aligns to two different contigs\".format(hit['qname']), file=sys.stderr)\n ambig_reads += 1\n else:\n continue\n else:\n hit_dict[hit['qname']] = hit['rname']\n\n print(\"{} of {} processed reads were ambiguous.\".format(ambig_reads, processed_reads))\n\n # condense the hit dict into a contig dict\n contig_dict = {}\n for read, contig in hit_dict.items():\n if contig_dict.get(contig, 0):\n contig_dict[contig].append(read)\n else:\n contig_dict[contig] = [read]\n\n return contig_dict", "def parse_fasta_file(filename):\n if filename.endswith('.gz'):\n opener = lambda filename: gzip.open(filename, 'rt')\n else:\n opener = lambda filename: open(filename, 'r')\n\n with opener(filename) as f:\n fasta_iter = (it[1] for it in itertools.groupby(f, is_header))\n for name in fasta_iter:\n name = name.__next__()[1:].strip()\n sequences = ''.join(seq.strip() for seq in fasta_iter.__next__())\n yield name, sequences", "def FASTA_iterator(filename):\n fasta_file=open(filename, \"r\")\n id_fasta=\"\"\n seq_fasta=\"\"\n\n for line in fasta_file:\n if line.startswith(\">\"):\n if id_fasta == \"\":\n id_fasta=line.strip()\n continue\n fasta = id_fasta , seq_fasta\n yield fasta\n seq_fasta=\"\"\n id_fasta=line.strip()\n\n else:\n seq_fasta += line.strip()\n\n if seq_fasta != \"\":\n yield id_fasta, seq_fasta", "def read_fasta(name):\n assert os.path.exists(name), name + ' does not exist'\n sequence_lst = []\n header_lst = []\n header = \"\"\n sequence = \"\"\n with open(name, \"rt\") as f_in:\n for line in f_in:\n data = line.strip()\n # jump empty lines\n if not data:\n continue\n # store header and sequence when a new header\n # (i.e. sequence) is found\n if sequence and header and data.startswith(\">\"):\n header_lst.append(header)\n sequence_lst.append(sequence)\n # reset header and sequence\n header = \"\"\n sequence = \"\"\n # save header of sequence\n if data.startswith(\">\"):\n header = data[1:]\n # save sequence\n if \">\" not in data:\n sequence += data\n # save last sequence\n if header and sequence:\n header_lst.append(header)\n sequence_lst.append(sequence)\n # outputs\n assert len(header_lst) == len(sequence_lst), \\\n \"cannot read same number of headers and sequences\"\n print(\"read %d sequences in %s\" % (len(sequence_lst), name))\n if len(sequence_lst) == 0:\n print(\"WARNING: {} seems empty of sequence\".format(name))\n return header_lst, sequence_lst", "def iter_fastq(in_fasta, quals, label_transform=split_lib_transform):\r\n for label, seq in parse_fasta(in_fasta):\r\n new_label, qual_id = label_transform(label)\r\n seq_id = label.split()[0]\r\n if seq_id.startswith('>'):\r\n seq_id = seq_id[1:]\r\n qual = quals[qual_id]\r\n yield make_fastq_rec(new_label, seq, qual), seq_id", "def fasta_reader(fasta):\n # ditch the boolean (x[0]) and just keep the header/seq grouping\n fa_iter = (x[1] for x in itertools.groupby(fasta, lambda line: line[0] == \">\"))\n for header in fa_iter:\n # drop the \">\"\n name = next(header)[1:].strip()\n # join all sequence lines to one by iterating until the next group.\n read = \"\".join(s.strip() for s in next(fa_iter))\n yield name, read", "def fasta_read_generator(file_handler):\r\n seq = []\r\n name = ''\r\n for line in file_handler:\r\n if line[0] == '>':\r\n sequence = ''.join(seq)\r\n if name: # only yield when we already have all data for the first sequence\r\n yield name, sequence\r\n name = line.rstrip()[1:] # omitting the leading >\r\n seq = []\r\n else:\r\n seq += [line]#.rstrip()] # keep line breaks\r\n sequence = ''.join(seq)\r\n yield name, sequence # don't forget the last sequence\r", "def mrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnalen += int(fields[4]) - int(fields[3]) + 1\n accmatch = re.search(r'accession=([^;\\n]+)', fields[8])\n assert accmatch, 'Unable to parse mRNA accession: %s' % fields[8]\n mrnaacc = accmatch.group(1)\n elif entry.startswith('###'):\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'mature mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n else:\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n values = '%s %d %.3f %.3f %.3f' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0", "def to_fastq(self, prefix='', threads=1):\n # Write to uncompressed FASTQ for speed\n fastqs = [\n f'{prefix}_{i+1}.fastq.gz' if prefix else f'{i+1}.fastq.gz'\n for i in range(self.technology.n_files)\n ]\n logger.info(f'Splitting BAM file into FASTQs {\", \".join(fastqs)}')\n logger.warning('All quality scores will be converted to F')\n files = []\n lengths = [0, 0, 0]\n for substring in self.technology.barcode_positions + self.technology.umi_positions:\n lengths[substring.file\n ] = max(lengths[substring.file], substring.stop)\n\n try:\n for fastq in fastqs:\n files.append(open_as_text(fastq, 'w'))\n\n # Count total number only if the bam is local\n parse = urlparse(self.path)\n if not parse.scheme:\n with pysam.AlignmentFile(self.path, 'rb', threads=threads) as f:\n count = f.count(until_eof=True)\n logger.info(f'Detected {count} BAM entries')\n else:\n logger.warning((\n 'Skip counting total BAM entries in remote BAM. '\n 'This means a progress bar can not be displayed.'\n ))\n\n with pysam.AlignmentFile(self.path, 'rb', threads=threads) as f,\\\n tqdm() if parse.scheme else tqdm(total=count) as pbar:\n for item in f.fetch(until_eof=True):\n reads = ['N' * l for l in lengths] # noqa\n barcodes, umis, sequence = BAM.EXTRACT_FUNCTIONS[\n self.technology.name](item) # noqa\n\n # Set sequence.\n reads[self.technology.reads_file.file] = sequence\n\n # Barcode and UMI\n for barcode, substring in zip(\n barcodes, self.technology.barcode_positions):\n bc = reads[substring.file]\n reads[\n substring.file\n ] = f'{bc[:substring.start]}{barcode}{bc[substring.stop:]}'\n for umi, substring in zip(umis,\n self.technology.umi_positions):\n u = reads[substring.file]\n reads[\n substring.file\n ] = f'{u[:substring.start]}{umi}{u[substring.stop:]}'\n\n # Write to each file.\n for file, read in zip(files, reads):\n file.write(f'@{item.query_name}\\n')\n file.write(f'{read.upper()}\\n')\n file.write('+\\n')\n file.write(f'{\"F\" * len(read)}\\n')\n\n pbar.update(1)\n\n finally:\n for file in files:\n file.close()\n\n return fastqs, [\n OrderedTechnology(self.technology, tuple(range(len(fastqs))))\n ]", "def scarf_to_fastq(infile=sys.stdin, outfile=sys.stdout):\n infile = open_gzipped(infile)\n outfile = open_gzipped(outfile, 'wt')\n for line in infile:\n fields = line.rstrip().split(':')\n qual = fields.pop()\n seq = fields.pop()\n outfile.write('{0}\\n{1}\\n+\\n{2}\\n'.format(\n '@' + ':'.join(fields),\n seq,\n qual))", "def bam_to_rec(in_file):\n bam_file = pysam.Samfile(in_file, \"rb\")\n for read in bam_file:\n seq = Seq.Seq(read.seq)\n q = read.qual\n n = read.query_name\n\n fastq_string = \"@%s\\n%s\\n+\\n%s\\n\" % (n, seq, q)\n record = SeqIO.read(StringIO.StringIO(fastq_string), \"fastq-sanger\")\n\n yield record", "def fasta(path):\n label = None\n sequence = None\n with open(path, 'r') as data:\n for line in data:\n line = line.strip()\n if line.startswith('>'):\n if label and sequence:\n yield (label, sequence)\n label = line[1:]\n sequence = \"\"\n else:\n sequence += line\n\n if label and sequence:\n yield (label, sequence)", "def parse_fasta_use_bio(file_name):\n pro_id_list = []\n for seq_record in SeqIO.parse(file_name, \"fasta\"):\n tmp_list = seq_record.id.strip('\\n').split('|')\n pro_id_list.append(tmp_list[1])\n # break\n\n return pro_id_list", "def read_fasta_to_dictionary(genome_file):\n filename = genome_file\n dct = {}\n\n id_name = \"\"\n sequence = \"\"\n first_pass = 1\n\n read_fh = open(filename, 'r')\n for i, line in enumerate(read_fh):\n line = line.rstrip()\n if re.search(r'^>(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(.*)', line):\n\n match_obj = re.search(r'^>(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(.*)', line)\n if not first_pass:\n dct[id_name] = sequence\n id_name = match_obj.group(1)\n id_name = re.sub(r',', \"\", id_name)\n first_pass = 0\n sequence = \"\"\n\n elif re.search(r'^>(\\S+)(.*)', line):\n\n match_obj = re.search(r'^>(\\S+)(.*)', line)\n if not first_pass:\n dct[id_name] = sequence\n id_name = match_obj.group(1)\n id_name = re.sub(r'(\\d+)_', \"\", id_name)\n id_name = re.sub(r'.*\\|', \"\", id_name)\n first_pass = 0\n sequence = \"\"\n else:\n sequence += line\n dct[id_name] = sequence\n\n return dct", "def read_in_file():\n\t# Declare variables\n\treads = []\n\n\t# Get command line arguments\n\targuments = sys.argv\n\targuments_length = len(arguments)\n\n\t# Read file is the first argument\n\tread_file_name = arguments[1]\n\n\t# Process read file \n\tread_file = open(read_file_name, 'r')\n\tfor line in read_file:\n\t\tread_info = line.split()\n\t\tread_string = read_info[2].replace('\\'', '')\n\t\tnew_read = GenerativeRead(read_string, [], read_info[5], read_info[3], None, [], read_info[0], read_info[1], read_info[4]) \n\t\treads.append(new_read)\n\tread_file.close()\n\n\t# Repeat regions file in the second argument\n\trepeat_file_name = arguments[2]\n\n\t# Process repeat file\n\trepeat_file = open(repeat_file_name, 'r')\n\talignments = [[]]\n\talignment_index = -1\n\tprevious_line = ''\n\n\n\tfor line in repeat_file:\n\t\talignment_info = line.split()\n\n\t\t# This consists of a tuple of alignment string, alignment start position and alignment chromosome\n\t\t#new_align = alignment_info[2], alignment_info[4], alignment_info[3]\n\n\t\tnew_align = Alignment(alignment_info[2], None, alignment_info[4], alignment_info[3])\n\n\t\tif previous_line != alignment_info[0]:\n\t\t\t# It is not a repeat\n\t\t\talignment_index = alignment_index + 1\n\t\t\talignments.append([])\n\t\t\tprevious_line = alignment_info[0]\n\n\t\talignments[alignment_index].append(new_align)\n\n\trepeat_file.close()\n\n\t# Associate each read with the other alignments\n\tfor read in reads:\n\t\t# Find the other alignments\n\t\tpos = read.get_position()\n\t\tfound = False\n\t\tfound_index = -1\n\n\t\tfor a_index, alignment_lists in enumerate(alignments):\n\t\t\t# find matching alignments\n\t\t\t# TODO: Don't add alignment already have\n\t\t\t# TODO: Make functional with filter\n\t\t\tfor align in alignment_lists:\n\t\t\t\tif align.get_position() == pos:\n\t\t\t\t\tfound = True\n\t\t\t\t\tfound_index = a_index\n\t\t\t\t\tbreak\n\n\t\t\tif found is True:\n\t\t\t\tbreak\n\n\t\tif found is True:\n\t\t\tfor new_align in alignments[found_index]:\n\t\t\t\tread.add_alignment(new_align)\n\t\t\t\n\n\n\t# SNP files are the remaining ones\n\tsnp_file_names = [arguments[file_id] for file_id in range(3, arguments_length) ]\n\n\t# Process SNP files\n\tfor file_name in snp_file_names:\n\t\tsnp_file = open(file_name, 'r')\n\n\t\tfor line in snp_file:\n\t\t\tsnp_info = line.split()\n\t\t\tsnps = snp_info[3].split('/')\n\t\t\tsnp_pos = int(float(snp_info[2]))\n\n\t\t\t# Ignore alleles that are longer than one base\n\n\t\t\t\n\t\t\tif all(len(x) < 2 for x in snps):\n\n\t\t\t\t# Iterate through reads and determine whether or not it contains this SNP\n\t\t\t\tpos_low = snp_pos - 49\n\t\t\t\n\n\t\t\t\tfor read in reads:\n\t\t\t\t\tpositions = read.get_alignment_positions()\n\n\t\t\t\t\tfor p_index, p in enumerate(positions):\n\t\t\t\t\t\tp = int(float(p))\n\t\t\t\t\t\tif p >= pos_low and p <= snp_pos:\n\t\t\t\t\t\t\t# Get index of snp\n\t\t\t\t\t\t\toffset = snp_pos - p\n\t\t\t\t\t\t\tcalls = [0, 0, 0, 0]\n\t\t\t\t\t\t\tfor snp in snps:\n\t\t\t\t\t\t\t\tcall_index = get_base_num(snp)\n\t\t\t\t\t\t\t\tcalls[call_index] = 1\n\n\t\t\t\t\t\t\t# Add the SNP to the read\n\t\t\t\t\t\t\tread.add_snp(p_index, offset, calls)\n\t\t\t\t\t\t\t\n\t\tsnp_file.close()\n\treturn reads", "def parse_match(self, read_id, alignment_position, length, read_sequence, ref_sequence, qualities):\n start = alignment_position\n stop = start + length\n for i in range(start, stop):\n\n self.coverage[i] += 1\n allele = read_sequence[i-alignment_position]\n ref = ref_sequence[i-alignment_position]\n self.base_dictionary[read_id][i] = (allele, qualities[i-alignment_position])\n # self._update_base_dictionary(read_id, i, allele, qualities[i-alignment_position])\n if allele != ref:\n self.mismatch_count[i] += 1\n self._update_read_allele_dictionary(read_id, i, allele, MISMATCH_ALLELE, qualities[i-alignment_position])\n else:\n self.match_count[i] += 1\n # this slows things down a lot. Don't add reference allele to the dictionary if we don't use them\n # self._update_read_allele_dictionary(i, allele, MATCH_ALLELE)", "def fasta(file_path):\n \n print(f\"Parsing fasta '{file_path}'\")\n data = {\n 'ur_up_': [], 'accession': [],\n 'entry_name': [], 'offset': [],\n 'taxonomy': [], 'sequence': []\n }\n\n with open(file_path, 'r') as f:\n for i, line in enumerate(f):\n line = line.strip()\n \n if line[0] == '>':\n key = line[1:]\n \n if i == 0:\n name, offset = key.split(\"/\")\n ur_up_, acc = None, None\n else:\n ur_up_, acc, name_offset = key.split(\"|\")\n name, offset = name_offset.split('/')\n \n data['ur_up_'].append(ur_up_)\n data['accession'].append(acc)\n data['entry_name'].append(name)\n data['offset'].append(offset)\n data['sequence'].append('')\n data['taxonomy'].append(name.split('_')[1])\n else:\n data['sequence'][-1] += line\n \n if i and (i % 50000 == 0):\n print(f\"Reached: {i}\")\n\n return pd.DataFrame(data=data)", "def _parseHelper(fastaInput):\n\n desc, sequence = \"\", []\n\n for rline in fastaInput:\n line = rline.rstrip()\n if not line:\n break\n elif line[0] == \">\":\n if sequence: \n yield desc, ''.join(sequence)\n sequence = []\n desc = line[1:]\n else:\n sequence.append(line)\n\n if desc and sequence:\n yield desc, ''.join(sequence)", "def test_parse_fasta(self):\n filepath = currentdir + '\\\\test_files\\\\mult_query.fasta'\n self.assertTrue(os.path.isfile(filepath))\n queries = parse_fasta(filepath)\n exp = {\n 'contig_1_7184_6765': 'MDIKIHSDFSHANLNEMREVYSSVGWTKHTTKIIKQVFEASNVIALATINGRIIGFGRAISDGVFNAAIYDVVVHRDFQKQGIAKKIMEFLLDQLSHVSCVHLISTTGNEEFYRKLGLKRVKTGMARYLNPELSDEYLE',\n 'contig_1_10704_10537': 'MKGAESASFVLFLFHRSIRRAGNLHLKERANVPKECAKSGTIRKNFAHYFRIRLK',\n 'contig_1_11311_10706': 'MNPSLYHVVYFPLSTGGVMDFYRGLALGLGEEPKYRKVDLFRQIQQAIERLYHERRITPVFILDEMHLAKDAFLQDIAILFNFEMDSTNPFVLILAGLPHLQGKLRLNQHRPLDQRIIMRYRMGPLEKEEVAGYIKHRMKQAGAKHPIFTPSALEAIALQSRGWPRVINTLATTCLLYGYQLKKDAIDEEVVRMAAEEMGY',\n 'contig_1_11505_11365': 'MYKSFYSLSREPFAKETDPSEAYQGAPFQEALRALEYVKRTRGSGC',\n 'contig_1_13448_12804': 'MIQFHDFDIDVQTYAERGKENDFPLLKKCPHCRAKRPLHRHGYYERNALTPHGDYRIWIVRYRCRECLKTVSVLPSFLLPYFQYTLSAIWQVVKEQLGLTEGTNRAPFLPTKGRHHLLCPAVLPKPIKPSQLFCEAVEDHRPYREKRKGTGFLVDPDVGETRSLFGHQRHVGGRIPTPFCESNGILILHTYPKLEIVEVPTNLSYRRRKSDPVR'\n }\n self.assertTrue(queries == exp)", "def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True", "def parse(self):\n\n if self._parse is None:\n\n seqs = [] # list of Weighted Sequences generated by parsing file\n\n with open(self._seqfile, \"r\") as f:\n for i, l in enumerate(f.readlines()):\n try:\n float(l) # try if line is numbers only\n except ValueError:\n seqs.append(WeightSeq(l))\n\n self._parse = seqs\n\n return seqs\n else:\n return \"Sequence file was already parsed\"", "def get_sequin_annots(sequin_path, ref_contigs, quiet=False):\n annots = defaultdict(list)\n \n # We need a dummy class to hold the current state while parsing\n # (otherwise the below private functions can't modify it; there's no \"nonlocal\" in python 2.x)\n class _:\n in_contig = None\n in_feature = None\n gene_name = None\n desc = None\n chrom_start = None\n chrom_end = None\n strand = None\n feature_seq_str = \"\"\n coding_blocks = []\n \n def _save_sequin_feature():\n # The only features we care about are the CDS features. Others get discarded during parsing.\n if _.in_feature == \"CDS\":\n if len(_.feature_seq_str) == 0:\n if not quiet: sys.stderr.write(\"WARN: 0-length CDS in contig %s\" % _.in_contig)\n elif _.gene_name is None or _.strand is None or _.chrom_start is None or _.chrom_end is None:\n if not quiet: sys.stderr.write(\"WARN: invalid CDS feature in contig %s\" % _.in_contig)\n else:\n gene_seq = Seq(_.feature_seq_str, generic_dna)\n if _.strand == '-':\n gene_seq = gene_seq.reverse_complement()\n gene_seq_record = SeqRecord(gene_seq, id=_.gene_name, name=_.gene_name, description=_.desc)\n annot = Annot(_.chrom_start, _.chrom_end, _.strand == '-', gene_seq_record, \n _.coding_blocks)\n annots[contig_to_vcf_chrom(_.in_contig)].append(annot)\n _.in_feature = _.gene_name = _.desc = _.chrom_start = _.chrom_end = _.strand = None\n _.feature_seq_str = \"\"\n _.coding_blocks = []\n \n def _update_sequin_feature(fields):\n if fields[0] != \"\" and fields[1] != \"\":\n # If the first two fields are present, this specifies a sequence range\n if not (fields[0].isdigit() and fields[1].isdigit()):\n # We will only attempt to utilize *complete* CDS features\n # (None of the start or end positions can be qualified by \">\" or \"<\")\n _.in_feature = \"CDS-partial\"\n return\n\n # Append the specified sequence to the `_.feature_seq_str`.\n # Note: Sequin table coordinates, like GenBank, are 1-indexed, right-closed.\n start = int(fields[0])\n end = int(fields[1])\n if _.strand is None: \n _.strand = '+' if start <= end else '-'\n elif _.strand != ('+' if start <= end else '-'):\n sys.stderr.write(\"WARN: strand changed direction, invalid CDS\")\n _.in_feature = \"CDS-partial\"\n return\n if _.strand == '-':\n start, end = end, start\n start -= 1\n ref_contig = ref_contigs[_.in_contig]\n seg = str(ref_contig.seq)[start:end]\n _.coding_blocks.append((start, end))\n _.feature_seq_str = seg + _.feature_seq_str if _.strand == '-' else _.feature_seq_str + seg\n _.chrom_start = min(start, _.chrom_start if _.chrom_start is not None else float('inf'))\n _.chrom_end = max(end, _.chrom_end if _.chrom_end is not None else float('-inf'))\n \n elif len(fields) >= 5:\n # If the first three fields are blank, this specifies a qualifier key + value\n if fields[3] == \"gene\":\n _.gene_name = fields[4]\n elif fields[3] == \"product\":\n _.desc = fields[4]\n \n with open(sequin_path) as f:\n for line in f:\n line = line.rstrip(\"\\n\")\n fields = line.split(\"\\t\", 4)\n if len(line.strip()) == 0:\n # Whitespace-only lines signal the end of feature data for a contig.\n # They may be followed by INFO: lines from the annotator, which we ignore.\n _save_sequin_feature()\n _.in_contig = None\n elif _.in_contig is None and line[0] == '>':\n # Lines that begin with \">Feature \" signal the start of feature data for a contig\n # Fields are separated by spaces; the second field is the full contig ID\n _save_sequin_feature()\n sp_fields = line[1:].split(' ')\n if sp_fields[0] == 'Feature' and len(sp_fields) >= 2:\n if ref_contigs.has_key(sp_fields[1]):\n _.in_contig = sp_fields[1]\n elif not quiet:\n sys.stderr.write(\"WARN: unknown contig in Sequin file: %s\" % sp_fields[1])\n elif _.in_contig is not None:\n if len(fields) < 3: \n if not quiet: sys.stderr.write(\"WARN: incomplete Sequin line: %s\" % line)\n next\n in_new_feature = fields[2].strip() != \"\"\n if _.in_feature is None or in_new_feature:\n _save_sequin_feature()\n _.in_feature = fields[2].strip()\n if _.in_feature == \"CDS\":\n _update_sequin_feature(fields)\n elif _.in_feature == \"CDS\":\n _update_sequin_feature(fields)\n \n return annots", "def entrez_fasta_parser(handleFasta):\n fullList = handleFasta.read().split(\"\\n\") \n resL = []\n seqFlag = False\n for fullLine in fullList:\n if fullLine == \"\":\n seqFlag = False\n continue\n elif fullLine[0] == \">\":\n resL.append(fullLine + \"\\n\")\n seqFlag = True\n elif seqFlag:\n resL[-1] += fullLine \n return resL", "def readFastaFile(filename):", "def _read_pyMatch(fn, precursors):\n with open(fn) as handle:\n reads = defaultdict(realign)\n for line in handle:\n query_name, seq, chrom, reference_start, end, mism, add = line.split()\n reference_start = int(reference_start)\n # chrom = handle.getrname(cols[1])\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n if query_name not in reads:\n reads[query_name].sequence = seq\n iso = isomir()\n iso.align = line\n iso.start = reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start)\n logger.debug(\"%s %s %s %s %s\" % (query_name, reference_start, chrom, iso.subs, iso.add))\n if len(iso.subs) > 1:\n continue\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def _read_bam(bam_fn, precursors):\n mode = \"r\" if bam_fn.endswith(\"sam\") else \"rb\"\n handle = pysam.Samfile(bam_fn, mode)\n reads = defaultdict(realign)\n for line in handle:\n chrom = handle.getrname(line.reference_id)\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n query_name = line.query_name\n if query_name not in reads:\n reads[query_name].sequence = line.query_sequence\n iso = isomir()\n iso.align = line\n iso.start = line.reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], line.reference_start)\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def parse_fasta(idfile):\n seqids = set()\n data = OrderedDict()\n fatalerror = False\n\n with open(idfile, 'r') as f:\n for l in f:\n p=l.strip().split(\"\\t\")\n seqids.add(p[0])\n for meta in re.findall('\\\\[(.*?)\\\\]', p[1]):\n tag,value=meta.split('=')\n if tag not in data:\n data[tag] = {}\n data[tag][p[0]]=value\n\n\n # now we have read the file, lets check a few things out\n \n wantedkeys = ['latitude', 'longitude', 'latlon', 'locality', 'country']\n for w in wantedkeys:\n if w not in data:\n sys.stderr.write(\"FILE ERROR: No {} entries were found in {}\\n\".format(w, idfile))\n continue\n for s in seqids:\n if s not in data[w]:\n sys.stderr.write(\"ENTRY ERROR: No {} was found for {}\\n\".format(w, s))\n\n return seqids, data", "def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))", "def premrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnaacc = re.search(r'accession=([^;\\n]+)', fields[8]).group(1)\n mrnalen = int(fields[4]) - int(fields[3]) + 1\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'pre-mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n mrnaacc = ''\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n elif '\\texon\\t' in entry:\n exoncount += 1\n elif '\\tintron\\t' in entry:\n introncount += 1\n elif '\\tfive_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr5plen += int(fields[4]) - int(fields[3]) + 1\n elif '\\tthree_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr3plen += int(fields[4]) - int(fields[3]) + 1\n elif entry.startswith('###'):\n if mrnaacc != '':\n values = '%s %d %.3f %.3f %.3f %d %d %d %d' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent,\n exoncount, introncount, utr5plen, utr3plen)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n exonlen = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0", "def read_fasta(amplicon_file, minseqlen):\n with gzip.open(amplicon_file) as file:\n sequences = file.readlines()\n seqs = \"\"\n for sequence in sequences:\n #print(\"sequence\")\n seq = sequence.replace(b\"\\n\", b\"\")\n seq = seq.decode('utf8')\n #print(seq)\n for character in seq:\n if character not in \"TGAC\":\n if len(seqs)>=minseqlen:\n yield seqs\n #print(seqs)\n seq = \"\"\n seqs = \"\"\n break\n seqs += seq\n #print(seqs)\n yield seqs", "def get_fastg_seqs_dict(fastg_name, G):\n fp = open(fastg_name, 'r')\n seqs = {}\n for name,seq,qual in readfq(fp):\n name_parts = re.sub('[:,]',\" \", name[:-1]).split()\n node = name_parts[0]\n seqs[node] = seq\n return seqs", "def get_seqs(in_fpath):\n f = open(in_fpath, 'r')\n csvread = csv.reader(f)\n seqs = [str(row) for row in csvread]\n seq1 = re.sub('[^A-Z]+', \"\", seqs[0])\n seq2 = re.sub('[^A-Z]+', \"\", seqs[1])\n return seq1, seq2", "def FASTA_iterator (fasta_filename):\n file=open(fasta_filename,\"r\")\n seq=''\n for line in file:\n if line[0]==\">\":\n if seq != \"\":\n yield (lastid,seq)\n seq=''\n lastid=line.rstrip()[1:]\n else:\n lastid=line.rstrip()[1:]\n else:\n seq += line.rstrip()\n if seq != \"\":\n yield (lastid,seq)", "def parse_sam(rows):\n row1, row2 = rows\n mseqs = {}\n failed_list = []\n insert_list = []\n rname = row1['rname']\n qname = row1['qname']\n cigar1 = row1['cigar']\n cigar2 = row2['cigar']\n\n # filtering criteria\n reason = None\n if cigar1 == '*':\n reason = 'R1 unmapped'\n if int(row1['mapq']) < read_mapping_cutoff:\n reason = 'R1 low mapq'\n\n if cigar2 == '*':\n reason = 'R2 unmapped'\n if int(row2['mapq']) < read_mapping_cutoff:\n reason = 'R2 low mapq'\n\n genotype1, genotype2 = None, None\n try:\n genotype1 = row1['rname'].split('-')[1][0]\n genotype2 = row2['rname'].split('-')[1][0]\n except:\n reason = 'discordant map'\n pass\n\n if genotype1 != genotype2:\n reason = 'map conflict'\n\n if reason:\n failed_list.append({'qname': qname,\n 'rname1': row1['rname'],\n 'rname2': row2['rname'],\n 'reason': reason})\n else:\n pos1 = int(row1['pos'])-1 # convert 1-index to 0-index\n _, seq1, qual1, inserts = apply_cigar(cigar1, row1['seq'], row1['qual'])\n \n # report insertions relative to sample consensus\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row1['flag']) else 'R',\n 'refname': rname,\n 'pos': pos1+left,\n 'insert': iseq,\n 'qual': iqual})\n \n seq1 = '-'*pos1 + seq1 # pad sequence on left\n qual1 = '!'*pos1 + qual1 # assign lowest quality to gap prefix so it does not override mate\n \n \n # now process the mate\n pos2 = int(row2['pos'])-1 # convert 1-index to 0-index\n _, seq2, qual2, inserts = apply_cigar(cigar2, row2['seq'], row2['qual'])\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row2['flag']) else 'R',\n 'refname': rname,\n 'pos': pos2+left,\n 'insert': iseq,\n 'qual': iqual})\n seq2 = '-'*pos2 + seq2\n qual2 = '!'*pos2 + qual2\n \n # merge reads\n for qcut in sam2aln_q_cutoffs:\n mseq = merge_pairs(seq1, seq2, qual1, qual2, qcut)\n prop_N = mseq.count('N') / float(len(mseq.strip('-')))\n if prop_N > max_prop_N:\n # fail read pair\n failed_list.append({'qname': qname,\n 'reason': 'merge failure'})\n continue\n mseqs[qcut] = mseq\n\n return rname, mseqs, insert_list, failed_list", "def parse_file(file_name, barcode_map=barcode_map):\n\n with open(file_name) as file_handle:\n results = defaultdict(Counter)\n try:\n while True:\n name = file_handle.next()\n seq = file_handle.next()\n plus = file_handle.next()\n qual = file_handle.next()\n handle_seq(seq, barcode_map, results)\n except StopIteration:\n pass\n return pd.DataFrame(results).T.fillna(0)", "def fasta_iter(fh: io.TextIOWrapper) -> dict:\n # ditch the boolean (x[0]) and just keep the header or sequence since\n # we know they alternate.\n faiter = (x[1] for x in groupby(fh, lambda line: line[0] == \">\"))\n for header in faiter:\n # drop the \">\"\n header = next(header)[1:].strip()\n # join all sequence lines to one.\n seq = \"\".join(s.strip() for s in next(faiter))\n yield {\"header\": header, \"seq\": seq}", "def process(\n self,\n name_and_reads: Tuple[str, List[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n name, reads = name_and_reads[0], list(name_and_reads[1])\n reads_copy = copy.deepcopy(reads)\n # Indent sequence strings by starting position.\n for read in reads_copy:\n indent = dc_constants.GAP_OR_PAD * read.alignment.position.position\n read.aligned_sequence = indent + read.aligned_sequence\n indented_cigar_str = indent + struct_utils.get_string_field(\n read.info, 'expanded_cigar')[0]\n struct_utils.set_string_field(read.info, 'expanded_cigar',\n indented_cigar_str)\n yield name, reads_copy", "def __next__(self):\n # ++++ Get Next Four Lines ++++\n elemList = []\n for i in range(4):\n line = self._file.readline()\n self._currentLineNumber += 1 ## increment file position\n if line:\n elemList.append(line.strip('\\n'))\n else: \n elemList.append(None)\n \n # ++++ Check Lines For Expected Form ++++\n trues = [bool(x) for x in elemList].count(True)\n nones = elemList.count(None)\n # -- Check for acceptable end of file --\n if nones == 4:\n raise StopIteration\n # -- Make sure we got 4 full lines of data --\n assert trues == 4,\\\n \"** ERROR: It looks like I encountered a premature EOF or empty line.\\n\\\n Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**\" % (self._currentLineNumber)\n # -- Make sure we are in the correct \"register\" --\n assert elemList[0].startswith(self._hdSyms[0]),\\\n \"** ERROR: The 1st line in fastq element does not start with '%s'.\\n\\\n Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**\" % (self._hdSyms[0],self._currentLineNumber) \n assert elemList[2].startswith(self._hdSyms[1]),\\\n \"** ERROR: The 3rd line in fastq element does not start with '%s'.\\n\\\n Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**\" % (self._hdSyms[1],self._currentLineNumber) \n # -- Make sure the seq line and qual line have equal lengths --\n assert len(elemList[1]) == len(elemList[3]), \"** ERROR: The length of Sequence data and Quality data of the last record aren't equal.\\n\\\n Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**\" % (self._currentLineNumber) \n \n # ++++ Return fatsQ data as tuple ++++\n return tuple(elemList)", "def extract_fastq_info(fastq):\n f = gzip.open(fastq, 'rb')\n header_lines = [x.replace(\"\\n\",\"\") for x in f.readlines(10000) if x.startswith(\"@\")]\n\n for heading in header_lines:\n l = re.split(r'(\\:|#| )',heading)\n line = {}\n index_set = []\n if len(l) == 11:\n line[\"instrument\"] = l[0]\n line[\"flowcell_lane\"] = l[2]\n line[\"flowcell_tile\"] = l[4]\n try:\n line[\"pair\"] = l[10].split(\"/\")[1]\n index_set.append(l[10].split(\"/\")[0])\n except:\n pass\n elif len(l) == 21:\n line[\"instrument\"] = l[0]\n line[\"run_id\"] = l[2]\n line[\"flowcell_id\"] = l[4]\n line[\"flowcell_lane\"] = l[6]\n line[\"flowcell_tile\"] = l[8]\n line[\"pair\"] = l[14]\n line[\"filtered\"] = l[16]\n line[\"control_bits\"] = l[16]\n line[\"index\"] = l[20]\n index_set.append(l[20])\n else:\n print \"error\", l\n line[\"index\"] = most_common(index_set)\n return line", "def read_fasta(src, remove_gaps=False):\n file_obj = None\n if isinstance(src, str):\n try:\n file_obj = open(src, \"r\")\n except IOError:\n print((\"The file `%s` does not exist, exiting gracefully\" % src))\n elif isinstance(src, filetypes):\n file_obj = src\n else:\n raise TypeError('FASTA reader cannot recognize the source of %s, %s, %s' % (src,type(src),isinstance(src, filetypes)))\n name = None\n seq_list = list()\n for line_number, i in enumerate(file_obj):\n if i.startswith('>'):\n if name:\n if remove_gaps:\n yield name, ''.join(seq_list).replace('-', '')\n else:\n yield name, ''.join(seq_list)\n seq_list = list()\n name = i[1:].strip()\n else:\n #seq = ''.join(i.strip().upper().split())\n seq = ''.join(i.strip().split())\n #if not is_sequence_legal(seq):\n # raise Exception(\"Error: illegal characeters in sequence at line %d\" % line_number)\n seq_list.append(seq)\n if name:\n if remove_gaps:\n yield name, ''.join(seq_list).replace('-', '')\n else:\n yield name, ''.join(seq_list)\n if isinstance(src, str):\n file_obj.close()", "def read_fasta(fasta_file):\n\n seq_dict = dict() # Declare a new dictionary\n\n with open(fasta_file,'r') as f:\n lines = f.readlines()\n defline = \"\"\n for li in lines:\n li = li.rstrip() # remove newlines\n if '>' in li:\n defline = li # if i use 'id' it is blue; why?\n seq_dict[defline] = \"\"\n else:\n li = li.upper() # just to clean up sequence\n seq_dict[defline] += li\n\n return seq_dict", "def parse_bam():\n global sample_name, header, segmentID, bam\n sys.stderr.write(time.strftime(\"%c\") + \" Busy with parsing bam file...\\n\")\n bam = pysam.AlignmentFile(NanoSV.opts_bam, 'rb')\n if not bam.has_index():\n sys.exit('The bam has no index file')\n header = bam.header\n if 'HD' in header:\n if not header['HD']['SO'] == 'coordinate':\n sys.exit('The bam file is not coordinate sorted')\n if 'RG' in header:\n if type(header['RG']) is list:\n sample_name = header['RG'][0]['SM']\n else:\n sample_name = header['RG']['SM']\n else:\n sample_name = re.sub('(\\.sorted)?\\.bam$', '', str(NanoSV.opts_bam))\n\n for line in bam:\n if line.query_name in reads:\n read = reads[line.query_name]\n else:\n read = r.Read(line.query_name, line.infer_read_length())\n reads[line.query_name] = read\n\n if line.flag & 4 or line.mapping_quality < NanoSV.opts_min_mapq:\n continue\n segment = s.Segment(segmentID, line.query_name, line.flag, line.reference_name, line.reference_start+1, line.mapping_quality,\n line.query_alignment_length)\n segment.end = line.reference_start + line.reference_length\n if line.has_tag('MD'):\n matches = sum(map(int, re.findall(r\"(\\d+)\", line.get_tag('MD'))))\n segment.pid = format(matches / segment.length, '.3f')\n else:\n segment.pid = format(line.get_cigar_stats()[0][7] / segment.length, '.3f')\n if segment.pid == \"0.000\":\n segment.pid = format(line.get_cigar_stats()[0][0] / segment.length, '.3f')\n if line.flag & 16:\n if line.cigartuples[-1][0] == 5 or line.cigartuples[-1][0] == 4:\n segment.clip = line.cigartuples[-1][1]\n else:\n segment.clip = 0\n if line.cigartuples[0][0] == 5 or line.cigartuples[0][0] == 4:\n segment.clip_2 = line.cigartuples[0][1]\n else:\n segment.clip_2 = 0\n else:\n if line.cigartuples[0][0] == 5 or line.cigartuples[0][0] == 4:\n segment.clip = line.cigartuples[0][1]\n else:\n segment.clip = 0\n if line.cigartuples[-1][0] == 5 or line.cigartuples[-1][0] == 4:\n segment.clip_2 = line.cigartuples[-1][1]\n else:\n segment.clip_2 = 0\n if float(segment.pid) < NanoSV.opts_min_pid:\n continue\n read.addSegment(segment)\n segments[segmentID] = segment\n segmentID += 1", "def read_fasta_file(filename):\n sequences_lines = {}\n current_sequence_lines = None\n with open(filename) as fp:\n for line in fp:\n line = line.strip()\n if line.startswith(';') or not line:\n continue\n if line.startswith('>'):\n sequence_name = line.lstrip('>')\n current_sequence_lines = []\n sequences_lines[sequence_name] = current_sequence_lines\n else:\n if current_sequence_lines is not None:\n current_sequence_lines.append(line)\n sequences = {}\n for name, lines in sequences_lines.items():\n sequences[name] = ''.join(lines)\n return sequences", "def processFasta(file,testStr):\n header = \"\"\n seq = \"\"\n with open(file, \"r\") as f:\n for line in f:\n line = line.strip()\n if(line.startswith(\">\")):\n if(len(header) == 0 ):\n #first entry:\n header = line[1:]\n else:\n #this is a new entry\n indexes = getIndexes(seq,testStr)\n if len(indexes) > 0:\n print(\"{} in {}: {} times ({})\".format(testStr, header,len(indexes),indexes))\n seq = \"\"\n header = line[1:]\n else:\n seq +=line\n #processing the final entry\n indexes = getIndexes(seq,testStr)\n if len(indexes) > 0:\n print(\"{} in {}: {} times ({})\".format(testStr, header,len(indexes),indexes))", "def parse_sam(in_file, out_file, read_type , strand):\n out_handle = open(out_file , 'a')\n if strand == 'watson':\n nt = ['C']\n else:\n nt = ['G']\n count = 0\n # print 'Warning, only works for forward mapped reads'\n mismatch = 0\n clip_count_total = 0\n for line in open(in_file, 'r'):\n modulo_line_no = count % 2\n #alternates between 0 and 1\n if line.startswith('@'):\n continue\n split_line = line.rstrip('\\n').split('\\t')\n #skip read pairs with improper flags.\n #TODO: do this filtering in mark_PCR_duplicates or elsewhere with access to pysam.\n if split_line[1] not in ['0', '99', '147']:\n mismatch += 1\n count += 1\n # continue\n char_count = ''\n clip_count = 0\n for char in split_line[5]:\n if not char.isalpha():\n char_count += char\n elif char == 'S':\n clip_count += int(char_count)\n else:\n char_count = ''\n if clip_count > 6:\n clip_count_total += 1\n count += 1\n # continue\n header = split_line[0].split('|')\n #meth_post list can be present for both R1 and R2 the last Samtools tag added should be the RN:Z: tag, look\n #to the right of this tag only\n meth_pos_list = split_line[0][split_line[0].rindex(':Z:'):].split('|')[1:]\n out_line = [header[0]]\n out_line += split_line[1:9]\n seq = list(split_line[9])\n try:\n meth_pos = [int(n) for n in meth_pos_list[-modulo_line_no].split(',')]\n for n in meth_pos:\n if n >= len(seq):\n break\n if seq[n] not in ['T','A']:\n break\n seq[n] = nt[-modulo_line_no]\n except ValueError:\n pass\n out_line += [''.join(seq)]\n out_line += split_line[10:]\n for item in header[1:]:\n if ':' in item and item not in out_line:\n out_line.append(item)\n # out_line += header[3:6]\n out_handle.write('\\t'.join(out_line) + '\\n')\n count += 1\n print('%s mismatches out of %s' % (mismatch, count))\n print('%s reads out of %s soft clipped more than 5' % (clip_count_total, count))", "def fasta_seqs(file_name):\n list = []\n with open('../test_files/' + file_name, 'r') as infile:\n text = infile.read()\n seqs = text.split('>')\n for seq in seqs:\n try:\n x = seq.split('\\n', 1)\n # sequence will be stored in x[1], and i am removing nextline '\\n' characters that comes with it.\n list.append(x[1].replace('\\n', ''))\n except:\n pass\n return list", "def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r" ]
[ "0.72407734", "0.7033901", "0.69433504", "0.67568797", "0.6737888", "0.66820616", "0.65858674", "0.653524", "0.652288", "0.6501695", "0.6497522", "0.64846104", "0.64839154", "0.646596", "0.63651484", "0.63603467", "0.63447005", "0.6319669", "0.63027084", "0.6278591", "0.6254532", "0.623839", "0.62380093", "0.62327605", "0.62280685", "0.62178034", "0.6195038", "0.61865884", "0.61806303", "0.6160223", "0.61473715", "0.61301404", "0.6128179", "0.6128027", "0.61191773", "0.610066", "0.60921293", "0.60759676", "0.6067734", "0.6050724", "0.6043029", "0.60366374", "0.60256284", "0.60104305", "0.6002509", "0.599808", "0.5989526", "0.59520185", "0.59374815", "0.5918492", "0.59007835", "0.5891945", "0.5889705", "0.5885955", "0.58765936", "0.58750516", "0.58731043", "0.5870869", "0.5867474", "0.58655053", "0.5862779", "0.5847614", "0.5834802", "0.5832519", "0.5829585", "0.5818171", "0.5801861", "0.5788589", "0.5785882", "0.57857907", "0.5781571", "0.5776385", "0.5766064", "0.57524645", "0.57521576", "0.57517046", "0.57462114", "0.57418454", "0.5727556", "0.57272", "0.57247657", "0.5704977", "0.56908745", "0.5690245", "0.5683261", "0.5682581", "0.56748897", "0.5667883", "0.5660243", "0.56529", "0.5641476", "0.56400424", "0.5622319", "0.5615467", "0.5612855", "0.560762", "0.5606221", "0.56047", "0.559859", "0.5597675" ]
0.74991745
0
Create a hash map between kmers and readings.
def kmerHashMap(reads, k): kmers_dict = {} # loop through all reads for i in range(len(reads)): # loop read's bases, except for the last k, to obtain its kmers for j in range(1+len(reads[i])-k): kmer = reads[i][j:k+j] if kmers_dict.has_key(kmer): kmers_dict[kmer].add(i) else: kmers_dict[kmer] = set([i]) return kmers_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_kmers_observed(read, k):\n counts = {}\n num_kmers = len(read) - k + 1\n for i in range (num_kmers):\n kmer= read[i:i+k]\n if kmer not in counts:\n counts[kmer] = 0\n counts[kmer] +=1\n return len(counts)", "def count_kmers_possible(read, k):\n num_kmers = {}\n num_kmers1 = len(read) - k + 1\n num_kmers2 = 4**k\n#num_kmers.append(min(num_kmers1,num_kmers2))\n num_kmers = min(num_kmers1,num_kmers2)\n num_kmers3 = max(num_kmers,0)\n return(num_kmers3)", "def _init_meg_map_dict(bands, length=0):\n\n # Initialize dictionary\n meg_map = dict()\n\n # Add oscillation bands\n for band in bands:\n meg_map[band] = np.zeros(length)\n\n return meg_map", "def get_results_for_init(self):\n return dict(init=self.centroids, n_clusters=self.centroids.shape[0])", "def build_map(model: str, n: int, kwc: int) -> Map:\n PKWS.clear()\n fited = cluster(n, model)\n return Map(\n cats=list(map(\"c-{}\".format, range(1, n + 1))),\n kws=list(\n map(\n lambda c: \", \".join(\n map(\n lambda x: x[0],\n count_it(\n Counter(\n chain.from_iterable(\n map(\n lambda ie: model == \"bert\"\n and SS_BERT.get(YS[model][ie[0]], [])\n or model == \"glove\"\n and SS_TFIDF[ie[0]]\n or SS_GLOVE[ie[0]],\n filter(\n lambda ie: ie[1] == c,\n enumerate(fited),\n ),\n ),\n )\n ),\n kwc,\n ),\n )\n ),\n range(n),\n )\n ),\n points=list(\n map(\n lambda y, x_y, x: Point(\n question=y, x=x_y[0], y=x_y[1], catagory=x,\n ),\n YS[model],\n XY[model],\n fited,\n )\n ),\n )", "def dict() -> Dict[str, Pin]:", "def ewriters():\n return dict(_ewriters)", "def hashdict(self):\n return {\n 'pix': super(rmap, self).hashdict(),\n 'map': hashlib.sha1(self.map.view(np.uint8)).hexdigest()\n }", "def new(num_buckets=256):\n aMap=[]", "def createMap(self):\n map = {}\n for rows in xrange(0,(size[1]/50)):\n for columns in xrange(0,(size[0]/50)):\n if rows == (size[1]/50)-1 or rows == 0 or columns== (size[0]/50)-1 or columns==0:\n map.update({(rows,columns):\"block\"})\n elif(rows%3 == 0):\n map.update({(rows,columns):random.choice(map_options)})\n else:\n map.update({(rows,columns):random.choice(map_options[:1])})\n\n self.map = map", "def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res", "def create_std_map():\n data = extract_data()\n data = data['Students']\n global std_map\n for item in data:\n std = Student(item['Rollno'])\n std.cached_data(item['Name'],item['Gender'],item['Sgpa'] \\\n ,item['Cgpa'],item['Points'],item['Rank'],item['G_rank'])\n std_map[item['Rollno']] = std", "def _calculate_leading_dim_map():\n small_matrixes = [(value, value+64) for value in range(256, 40192+512, 512)]\n large_matrixes = [(value, value+1088) for value in range(1024, 39936+1024, 1024)]\n return dict(small_matrixes + large_matrixes)", "def Dictionary_create(nMarkers, markerSize):\n pass", "def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict", "def __init__(self):\n self.map = defaultdict(list)", "def count_kmers(seq, k=3):\n # Start with an empty dictionary\n counts = {}\n # Calculate how many kmers of length k there are\n num_kmers = len(str(seq)) - k + 1\n # Loop over the kmer start positions\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = str(seq)[i:i+k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n # Return the final counts\n return counts", "def readKerning(self):\n\t\tdata = self._fileSystem.readKerning()\n\t\tif data is None:\n\t\t\treturn\n\t\tkerning = {}\n\t\tfor side1 in data:\n\t\t\tfor side2 in data[side1]:\n\t\t\t\tvalue = data[side1][side2]\n\t\t\t\tkerning[side1, side2] = value\n\t\treturn kerning", "def get_speakers_map(self):\n speakers = {}\n for clu in self:\n speakers[clu] = self[clu].get_speaker()\n return speakers", "def _create_color_map(self):\n unique_labels = np.unique(self.out_labels)\n color_map = {}\n for unique_label in unique_labels:\n color_map[unique_label] = self._random_color()\n\n return color_map", "def construct_ngrams_dict(ngrams_list):\n counts = {}\n\n for t in ngrams_list:\n key = hash_function(t)\n if key in counts:\n counts[key] += 1\n else:\n counts[key] = 1\n return counts", "def mapping(reads_list, k, h, index, genome):\n snps_dict = {}\n # Map the read on the genome and store the snps found\n for read in reads_list:\n reversed_read = reverse_read(read)\n reverse = False\n list_mapping = seed_and_extend(read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = False\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on straight strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n list_mapping = seed_and_extend(reversed_read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = True\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on reverse strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n reverse = False\n if VERBOSE:\n print(\"No mapping found for read number :\", reads_list.index(read) + 1)\n if list_mapping[0] < len(genome):\n for mismatch in list_mapping[2]:\n if reverse == False:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [read[mismatch - list_mapping[0]]]\n else:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(reversed_read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [reversed_read[mismatch - list_mapping[0]]]\n\n return snps_dict", "def finalize_readings(self):\n finalized_readings = {}\n for reading in self.intermediate_readings:\n finalized_readings[reading] = list(\n self.intermediate_readings[reading].values()\n )\n return finalized_readings", "def custom_dictionary(nMarkers, markerSize):\n pass", "def create_data():\n # Locations\n data = {}\n num_vehicles = 20\n depot = 0\n locations = loc1\n demands = popn\n\n num_locations = len(locations)\n dist_matrix = {}\n\n for from_node in range(0,num_locations):\n dist_matrix[from_node] = {}\n\n for to_node in range(0,num_locations):\n dist_matrix[from_node][to_node] = (\n haversine(\n locations[from_node],[to_node])\n #locations[to_node],[from_node])\n \"\"\"\n data[\"distances\"] =dist_matrix\n data[\"num_locations\"] = len(dist_matrix)\n data[\"num_vehicles\"] = 6\n data[\"depot\"] = 0\n data[\"demands\"] = demands\n #data[\"vehicle_capacities\"] = capacities\n data[\"time_per_demand_unit\"] = 0.05\n return data\n \"\"\"\n return [ num_vehicles, depot, locations, dist_matrix]", "def get_cache(self):\n cache = {}\n for i in range(5):\n cache['%02d' % i] = {\n Arrays.OFFSETS: self._get_array_value(Arrays.OFFSETS)[i],\n Arrays.SCALES: self._get_array_value(Arrays.SCALES)[i]\n }\n return cache", "def qiskit_circuit_measurement_map(c: QiskitCircuit) -> Dict[int, int]:\n measurements = [x for x in c.data if x[0].name == 'measure']\n return {\n c.qubits.index(x[1][0]): c.clbits.index(x[2][0])\n for x in measurements\n }", "def new(num_buckets=256):#用空列表初始化字典\n\taMap=[]\n\tfor i in range(num_buckets):\n\t\taMap.append([])\n\treturn aMap", "def shards(self):\n shards_per_node = {}\n for node in self.nodes:\n num_shards = 0\n metrics = self.metrics(node)\n for family in metrics:\n for sample in family.samples:\n if sample.name == \"vectorized_reactor_utilization\":\n num_shards = max(num_shards,\n int(sample.labels[\"shard\"]))\n assert num_shards > 0\n shards_per_node[self.idx(node)] = num_shards\n return shards_per_node", "def get_term_map(self):\n\t\tterm_map = {}\n\t\tfor i, ranking in enumerate(self.get_descriptors(self.top_terms)):\n\t\t\tfor term in ranking:\n\t\t\t\tif not term in term_map:\n\t\t\t\t\tterm_map[term] = [ i ]\n\t\t\t\telse:\n\t\t\t\t\tterm_map[term].append(i)\n\t\treturn term_map", "def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A", "def build_sample_map(flowcell):\n result = {}\n rows = [(lane, lib[\"name\"]) for lib in flowcell[\"libraries\"] for lane in lib[\"lanes\"]]\n i = 1\n for _, name in sorted(set(rows)):\n if name not in result:\n result[name] = \"S{}\".format(i)\n i += 1\n return result", "def _createMap(self):\n width = self.map_size[0] * self.chunk_size\n height = self.map_size[1] * self.chunk_size\n map_array = np.zeros((height, width), dtype=float)\n chunks = {}\n clist = []\n for i in range(0, self.map_size[0]*self.map_size[1]):\n chunks[i+1] = Chunk(self)\n chunk_array = np.asarray(list(chunks.keys()))\n chunk_array.resize(self.map_size[0], self.map_size[1])\n return map_array, chunk_array, chunks", "def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass", "def __create_d_map(self):\n goal_map = {}\n # collect all goal nodes\n for i, row in enumerate(self.map.get_node_grid()):\n for j, node in enumerate(row):\n if node.borders_tile_of_type(Quarantine):\n goal_map[node.get_name()] = (i, j)\n # calculate distance to closest goal node for each node\n for i, row in enumerate(self.map.get_node_grid()):\n for j, node in enumerate(row):\n distances = [\n abs(i - y) + abs(j - x)\n for node_name, (y, x) in goal_map.items()\n ]\n self.d_map[node.get_name()] = min(distances)", "def _assign_reads( medians, centroids ):\n log.info(\"Assigning subreads reads to the closet amplicon cluster\")\n assignments = {'5p':set(), '3p':set()}\n five_prime, three_prime = centroids\n for read, median in medians.iteritems():\n five_prime_diff = abs(median - five_prime)\n three_prime_diff = abs(median - three_prime)\n if five_prime_diff < three_prime_diff:\n assignments['5p'].add( read )\n else:\n assignments['3p'].add( read )\n return assignments", "def gather_cache(self):\n cache = {\"grains\": {}, \"pillar\": {}}\n if self.grains or self.pillar:\n if self.opts.get(\"minion_data_cache\"):\n minions = self.cache.list(\"minions\")\n if not minions:\n return cache\n for minion in minions:\n total = self.cache.fetch(\"minions/{}\".format(minion), \"data\")\n\n if \"pillar\" in total:\n if self.pillar_keys:\n for key in self.pillar_keys:\n if key in total[\"pillar\"]:\n cache[\"pillar\"][minion][key] = total[\"pillar\"][key]\n else:\n cache[\"pillar\"][minion] = total[\"pillar\"]\n else:\n cache[\"pillar\"][minion] = {}\n\n if \"grains\" in total:\n if self.grain_keys:\n for key in self.grain_keys:\n if key in total[\"grains\"]:\n cache[\"grains\"][minion][key] = total[\"grains\"][key]\n else:\n cache[\"grains\"][minion] = total[\"grains\"]\n else:\n cache[\"grains\"][minion] = {}\n return cache", "def __init__(self):\n self.buckets = collections.defaultdict(list)", "def k_map(self):\n\t\tt1 = time.time()\n\t\tmapping_matrix = [] \n\t\tfor index in self.mapping:\n\t\t\tvector = np.zeros(len(self.unique_char),dtype=float)\n\t\t\tvector[index] = 1.0\n\t\t\tmapping_matrix.append(vector)\n\t\tprint(\"Time creating k map {:.3f} sec\".format(time.time()-t1))\n\t\tself.mapping_matrix = mapping_matrix\n\t\treturn mapping_matrix", "def get_map(self):\n\n self.mp = defaultdict(lambda : ord('x'))\n y, x = 0, 0\n while True:\n cond, output = self.ic()\n\n if cond: break\n # New row of the print out\n if output == 10:\n y += 1\n x = 0\n # Assign the value to the map\n else:\n self.mp[y,x] = output\n x += 1\n \n return self.mp", "def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict", "def make_stats(mapping):\r\n stats = [\"Clustersize\\t#\"]\r\n counts = defaultdict(int)\r\n for key in mapping.keys():\r\n counts[len(mapping[key])] += 1\r\n\r\n keys = sorted(counts.keys())\r\n for key in keys:\r\n stats.append(\"%d:\\t\\t%d\" % (key + 1, counts[key]))\r\n return \"\\n\".join(stats)", "def make_station_dict(self):\n self.station_dict = {}\n\n # interates over stations and puts the amount of connections in the dict\n for station in self.stations:\n length = len(self.stations[station].connections)\n self.station_dict[station] = length\n \n return self.station_dict", "def get_mke_scores():\n _scores = {k:[] for k in time_str_to_time.keys()} \n _scores['all'] = [] # add key for all milwaukee and all time zones\n for zip_ in zip_populations.keys(): \n res = query_all_crimes(zip_=zip_)\n print(f'[PROCESSING] {zip_}')\n crimes = to_df(res)\n create_crime_cat(crimes)\n integrate_weight_to_df(crimes)\n for time_sl in time_str_to_time.keys():\n sub = extract_crimes_by_sl(crimes, time_str_to_time[time_sl]) \n cas = compute_crime_score(sub, zip_) \n _scores[time_sl].append(cas)\n _scores['all'].append(cas)\n return _scores", "def __generate_dict_of_keys_to_classification__(self):\n dict_of_assigned_citations = {}\n # duplicating citation dataset to filter as matches go on meaning\n # it should result in quicker allocation\n # can be removed to reduce memory load at expense of speed\n list_of_unassigned = []\n for key in self.dict_of_keywords:\n list_of_current_key = []\n for citation_instance in self.array_of_citations:\n if key == citation_instance.get_classification():\n list_of_current_key.append(citation_instance)\n if \"Unassigned\" == citation_instance.get_classification():\n list_of_unassigned.append(citation_instance)\n dict_of_assigned_citations[key] = list_of_current_key\n dict_of_assigned_citations[\"Unassigned\"] = list_of_unassigned\n return dict_of_assigned_citations", "def create_station_mapping(station_data):\n station_map = {}\n for data_file in station_data:\n with open(data_file, 'r') as f_in:\n # set up csv reader object - note that we are using DictReader, which\n # takes the first row of the file as a header row for each row's\n # dictionary keys\n weather_reader = csv.DictReader(f_in)\n\n for row in weather_reader:\n station_map[row['station_id']] = row['landmark']\n return station_map", "def fresh_hash(self):\n _h = defaultdict(lambda: 0)\n very_small = 0.000000000001\n for g in self.groups: _h[g] = { \"total\": very_small, \"var_all\": 0 }\n return _h", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def init_meters(*metrics):\n return {m: util.AverageMeter() for m in metrics}", "def compute_metrics(self, episodes):\n metrics = {}\n _, visit_epoch = self.compute_visit_freq_table(episodes)\n metrics['visited_states_in_epoch'] = visit_epoch\n metrics['visited_states_in_history'] = \\\n len(self._visited_states_in_history) / 3 ** (self.n_disks)\n return metrics", "def lineBuilders() :\n return dict(_lineBuilders)", "def getT9dict():\r\n T9dict = {}\r\n all_letters = string.lowercase\r\n T9dict.update(mapkeystoletter(2, all_letters[0:3]))\r\n T9dict.update(mapkeystoletter(3, all_letters[3:6]))\r\n T9dict.update(mapkeystoletter(4, all_letters[6:9]))\r\n T9dict.update(mapkeystoletter(5, all_letters[9:12]))\r\n T9dict.update(mapkeystoletter(6, all_letters[12:15]))\r\n T9dict.update(mapkeystoletter(7, all_letters[15:19]))\r\n T9dict.update(mapkeystoletter(8, all_letters[19:22]))\r\n T9dict.update(mapkeystoletter(9, all_letters[22:26]))\r\n T9dict[' '] = 0\r\n\r\n return T9dict", "def __init__(self):\n self.mapr = OrderedDict()\n self.counter = 0", "def __init__(self):\n self.hashmap = {}", "def __init__(self):\n self.hashmap = {}", "def metrics(self):\n return {**self.prepend_name_dict(self._prefixes[0], self._train_metrics),\n **self.prepend_name_dict(self._prefixes[1], self.validator.metrics)}", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def new(num_buckets=256):\n\taMap = [] #creating empty list aMap\n\tfor i in range(0, num_buckets):\n\t\taMap.append([]) #append num_buckets into aMap\n\treturn aMap", "def getKmers(self):\n return self.kmers", "def buildNodesDict(self):\n # Get relevant nodes from TANA ca_jc, intersect with BUS_ROUTE_TRAVERSAL_EDGES.\n # Then get the X,Y for the features.\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n arcpy.AddXY_management(PublicTransit.RELEVANT_NODES)\n nodes = arcpy.SearchCursor(PublicTransit.RELEVANT_NODES, \"\", \"\",\n \"ID_hash; POINT_X; POINT_Y\", \"\")\n self.nodesDict = dict()\n numNodes = int(arcpy.GetCount_management(PublicTransit.RELEVANT_NODES).getOutput(0))\n print \"Found %d nodes\" % numNodes\n for node in nodes:\n self.nodesDict[node.ID_hash] = Node(node.ID_hash, node.POINT_X, node.POINT_Y)\n del node\n del nodes", "def create_average_feed_map(self,maps):\n\n avg_map = {'map':np.zeros((self.Ny,self.Nx)),\n 'cov':np.zeros((self.Ny,self.Nx))}\n \n nchans = maps.shape[-1]\n for i,(ifeed,feed) in enumerate(zip(self.feedlist,self.feeds)):\n if feed == 20:\n continue\n for ichan in range(nchans):\n tmp = maps[i,ichan]['map']/maps[i,ichan]['cov']\n cov = 1./maps[i,ichan]['cov']\n gd = np.isfinite(tmp)\n avg_map['map'][gd] += tmp[gd]\n avg_map['cov'][gd] += cov[gd]\n \n avg_map = self.average_maps(avg_map)\n return avg_map", "def processReadings(self, readings):\r\n return {key:value for key, value in readings.items() if not set(key).issubset(self.symbols)}", "def make_global_state(self, shreds_tags):\n doc_counts = collections.defaultdict(int)\n\n for doc, tags in shreds_tags.items():\n for tag in tags:\n doc_counts[tag] += 1\n\n num_docs = float(len(shreds_tags))\n\n idf = {}\n for tag, count in doc_counts.items():\n idf[tag] = math.log(num_docs / count)\n return {\n 'idf_map': idf,\n 'all_terms': sorted(idf.keys()),\n }", "def makeValMap(self,value = 'readcount'):\n self.valMap = np.zeros(len(self))\n self.valMap = self.valMap-1\n myTmp = []\n for x in range(0,len(self)):\n myTmp.append([])\n for i in self.children:\n for j in range(i.start,i.end+1):\n myTmp[j-self.start].append(i.__dict__[value])\n for nt in range(0,len(myTmp)):\n if len(myTmp[nt])>0:\n self.valMap[nt]=sum(myTmp[nt])/len(myTmp[nt])", "def make_dict(unused_s, unused_l, toks):\n result = {}\n key_value_pairs = chunks(toks, 2)\n for key_value_pair in key_value_pairs:\n result[key_value_pair[0]] = key_value_pair[1]\n return result", "def _create_ligand_smiles_dict(self) -> None:\n import json\n\n import pandas as pd\n\n from ..databases.pdb import smiles_from_pdb\n from ..utils import LocalFileStorage\n\n logging.debug(\"Reading available KLIFS structures from cache ...\")\n klifs_structures = pd.read_csv(LocalFileStorage.klifs_structure_db(self.cache_dir))\n\n logging.debug(\"Retrieving SMILES for orthosteric ligands ...\")\n pdb_to_smiles = smiles_from_pdb(set(klifs_structures[\"ligand.expo_id\"]))\n\n logging.debug(\"Saving local PDB SMILES dictionary ...\")\n with open(LocalFileStorage.pdb_smiles_json(self.cache_dir), \"w\") as wf:\n json.dump(pdb_to_smiles, wf)\n\n return", "def ranks_to_metrics_dict(ranks):\n mean_rank = np.mean(ranks)\n mean_reciprocal_rank = np.mean(1. / ranks)\n hits_at = {}\n for k in (1, 3, 10):\n hits_at[k] = np.mean(ranks <= k)*100\n return {\n 'MR': mean_rank,\n 'MRR': mean_reciprocal_rank,\n 'hits@[1,3,10]': hits_at\n }", "def assignment(self, addresses, centroids, k):\n newClusters = {}\n print centroids\n for (lat, long) in addresses:\n minDistance = float('Inf')\n minIndex = 0\n for i in range(k):\n if pow(self.euclideanDistance((lat, long), centroids[i]),2) < minDistance:\n minDistance = pow(self.euclideanDistance((lat, long), centroids[i]),2)\n minIndex = i\n if minIndex in newClusters:\n newClusters[minIndex].append((lat, long))\n else:\n newClusters[minIndex] = [(lat, long)]\n return newClusters", "def custom_dictionary_from(nMarkers, markerSize, baseDictionary):\n pass", "def create_n_1_gram_map(self) -> Dict[str, List[str]]:\n assert self.count_map is not None, 'count map is not initialized'\n # assert self.n_grams > 1, 'n-grams must be greater than 1 in order to create n_1 gram map'\n\n res: Dict[str, List[str]] = {}\n for sequence in self.model:\n sequence: str = cast(str, sequence)\n n_minus_1_grams = self.get_n_minus_1_grams(sequence)\n if n_minus_1_grams not in res:\n res[n_minus_1_grams] = []\n res[n_minus_1_grams].append(sequence)\n\n self.n_1_gram_map = res\n return res", "def detectron_weight_mapping(self):\n detectron_weight_mapping = {\n 'block.0.weight': 'blockConv1_w',\n 'block.0.bias': 'blockConv2_b',\n 'block.1.weight': 'blockBN1_w',\n 'block.1.running_mean': 'blockBN1_rm',\n 'block.1.running_var': 'blockBN1_rv',\n 'block.1.bias': 'blockBN1_b',\n 'block.3.weight': 'blockConv2_w',\n 'block.3.bias': 'blockConv2_b',\n 'block.4.weight': 'blockBN2_w',\n 'block.4.bias': 'blockBN2_b',\n 'block.4.running_mean': 'blockBN4_rm',\n 'block.4.running_var': 'blockBN4_rv',\n }\n orphan_in_detectron = []\n self.mapping_to_detectron = detectron_weight_mapping\n self.orphans_in_detectron = orphan_in_detectron\n return self.mapping_to_detectron, self.orphans_in_detectron", "def map_clusters(labels, rows):\r\n counts = Counter(labels)\r\n mappings = {c + 1: ((counts[c] / rows) * 100) for c in sorted(counts)}\r\n\r\n return mappings", "def get_observations(asteroid_map: str) -> Dict[Asteroid, List[Asteroid]]:\n # initialize asteroid map\n asteroids = intialize_asteroid_map(asteroid_map)\n all_observations = {}\n for asteroid_1 in asteroids:\n asteroid_1_observations = {}\n for asteroid_2 in asteroids:\n if asteroid_1 == asteroid_2:\n continue\n angle = calculate_angle(asteroid_1, asteroid_2)\n if angle in asteroid_1_observations:\n asteroid_1_observations[angle].append(asteroid_2)\n else:\n asteroid_1_observations[angle] = [asteroid_2]\n all_observations[asteroid_1] = asteroid_1_observations\n return all_observations", "def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n for tree in self.train_trees:\n for t_lemma, formeme in tree.nodes:\n if t_lemma not in self.dict_t_lemma:\n self.dict_t_lemma[t_lemma] = dict_ord\n dict_ord += 1\n if formeme not in self.dict_formeme:\n self.dict_formeme[formeme] = dict_ord\n dict_ord += 1\n\n self.dict_size = dict_ord", "def create_map():\n pass\n # for line in range(0, shared.lines):\n # map_data[line][0] = (1, -1)\n # map_data[line][shared.columns - 1] = (1, -1)\n #\n # for column in range(0, shared.columns):\n # map_data[0, column] = (-1, 1)\n # # if column <= shared.left_space or column > shared.columns - shared.left_space:\n # map_data[shared.lines - 1, column] = (-1, 1)", "def _get_markings(self):\n try:\n # First of all, we need to create the client that will send the requests\n # to the simulator. Here we'll assume the simulator is accepting\n\n host = rospy.get_param('/carla/host', 'localhost')\n port = rospy.get_param('/carla/port', 2000)\n\n client = carla.Client(host, port)\n client.set_timeout(2.0)\n\n # Once we have a client we can retrieve the world that is currently\n # running.\n world = client.get_world()\n markings = {}\n marks = world.get_map().get_all_landmarks()\n for mark in marks:\n if mark.name in markings:\n markings[mark.name].add(\n LandMarkPoint(mark.transform.location.x, -mark.transform.location.y,\n mark.transform.rotation.yaw,\n int(mark.id)))\n else:\n markings[mark.name] = set()\n markings[mark.name].add(\n LandMarkPoint(mark.transform.location.x, -mark.transform.location.y,\n mark.transform.rotation.yaw,\n int(mark.id)))\n return markings\n finally:\n pass", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = one_way_skar(d, source, target, others)\n return uniques", "def map_reads_2genes(self, reads_file):\n start1 = time()\n read_starts = self.__get_reads_pos(reads_file)\n start2 = time()\n times = 0\n for ref_gene in self.ref_genes:\n times += 1\n if times % 500 == 0:\n print 'calculated %d genes read count ...' % times\n if len(read_starts[ref_gene.chrom]) == 0:\n continue\n starts = read_starts[ref_gene.chrom]\n for es, ed in zip(ref_gene.exon_starts, ref_gene.exon_ends):\n # rd = starts[(starts > es) & (starts < ed)].size\n rd = cal_read_count(es, ed, starts)\n ref_gene.read_count += rd\n\n print 'start calculate rpkm ...'\n mapped_read_count = self.mapped_read_count\n for ref_gene in self.ref_genes:\n # calculate RPKM\n ref_gene.read_density = \\\n ref_gene.read_count * 1000 * 1000 * 1000. / (ref_gene.mRNA_length * mapped_read_count)\n print 'got reads time: %f' % (time() - start1)\n print 'map reads time: %f' % (time() - start2)", "def calcRs(distances):\n r = {}\n \n for key in distances.keys():\n summedDistances = 0\n for subkey in distances[key].keys():\n summedDistances += distances[key][subkey]\n r[key] = summedDistances/(len(distances.keys())-2)\n\n return r", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = one_way_skar(d, target, source, others)\n return uniques", "def get_hash_map(init_addr):\n addr = init_addr\n hash_map = []\n for i in range(0, len(WIN_HASH), 2):\n pair = WIN_HASH[i:i+2]\n hash_map.append((addr, pair[1]))\n hash_map.append((addr+1, pair[0]))\n addr += 8\n\n return hash_map", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = two_way_skar(d, [source, target], others)\n return uniques", "def getSHSIDDict():\n m = {}\n fin = open(\"SHSDataset/Chromas/msd_keys_mapping.cly\")\n for l in fin.readlines():\n l = l.rstrip()\n f = l.split(\",\")\n m[f[0]] = int(f[1])\n fin.close()\n return m", "def create_maps(self,data,tod,mjd,coords):\n features = np.log10(self.getFeatures(data))/np.log10(2)\n special_idx = np.where((features==16))[0]\n # This is for getting the stare data on more recent\n # calibration observations.\n point_data = self.get_point_data(data,special_idx)\n \n cel_maps = self.create_single_map(tod,\n coords['ra'],\n coords['dec'],\n self.source_positions['ra'][coords['sky_data_flag']],\n self.source_positions['dec'][coords['sky_data_flag']])\n az_maps = self.create_single_map(tod,\n coords['az'],\n coords['el'],\n self.source_positions['az'][coords['sky_data_flag']],\n self.source_positions['el'][coords['sky_data_flag']])\n cel_maps= self.average_maps(cel_maps)\n az_maps = self.average_maps(az_maps)\n xygrid = np.meshgrid((np.arange(self.Nx)+0.5)*self.dx - self.Nx*self.dx/2.,\n (np.arange(self.Ny)+0.5)*self.dy - self.Ny*self.dy/2.)\n \n \n cel_maps['xygrid']=xygrid\n cel_maps['StareCoords']= {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n az_maps['xygrid']=xygrid\n az_maps['StareCoords'] = {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n return cel_maps,az_maps", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n return {\n \"topic\": [\n self.from_text(),\n ],\n }", "def build_article_map(f='./wikispeedia_paths-and-graph/articles.tsv'):\n out_dict = {}\n count = 0\n with open(f, 'r') as r:\n for _ in xrange(12):\n next(r)\n for line in r:\n out_dict[line.strip('\\n')] = count\n count += 1\n return out_dict", "def get_spin_link_dict(peaklist):\n spin_link_dict = {}\n for peak in peaklist:\n spins = [spin for spin in peak\n if spin.atom is not None and spin.atom[0] == 'H']\n if len(spins) != 2:\n err = ('expected 2 Hydrogens in each peak, '\n 'found %d' % len(spins))\n raise ValueError(err)\n link = frozenset(spin.assignment for spin in spins)\n spin_link_dict.setdefault(link, []).append(peak)\n return spin_link_dict", "def mapping_to_index(self) -> Dict[int, int]:\n if not self._atom_mappings:\n self._atom_mappings = {\n atom.GetAtomMapNum(): atom.GetIdx()\n for atom in self.rd_mol.GetAtoms()\n if atom.GetAtomMapNum()\n }\n return self._atom_mappings", "def _read_landmarks(self):\n landmarks = {}\n with open(self.landmark_file_path, 'r') as f:\n for line in f.readlines():\n sp = line.split()\n key = sp[0][sp[0].rfind('/')+1:]\n landmarks[key] = [[int(sp[i]), int(sp[i+1])] for i in range(1, 11, 2)]\n\n return landmarks", "def _create_dictionary_of_ned_d(\n self):\n self.log.debug(\n 'starting the ``_create_dictionary_of_ned_d`` method')\n\n count = 0\n with open(self.pathToDataFile, 'rb') as csvFile:\n csvReader = csv.reader(\n csvFile, dialect='excel', delimiter=',', quotechar='\"')\n totalRows = sum(1 for row in csvReader)\n csvFile.close()\n totalCount = totalRows\n\n with open(self.pathToDataFile, 'rb') as csvFile:\n csvReader = csv.reader(\n csvFile, dialect='excel', delimiter=',', quotechar='\"')\n theseKeys = []\n dictList = []\n for row in csvReader:\n if len(theseKeys) == 0:\n totalRows -= 1\n if \"Exclusion Code\" in row and \"Hubble const.\" in row:\n for i in row:\n if i == \"redshift (z)\":\n theseKeys.append(\"redshift\")\n elif i == \"Hubble const.\":\n theseKeys.append(\"hubble_const\")\n elif i == \"G\":\n theseKeys.append(\"galaxy_index_id\")\n elif i == \"err\":\n theseKeys.append(\"dist_mod_err\")\n elif i == \"D (Mpc)\":\n theseKeys.append(\"dist_mpc\")\n elif i == \"Date (Yr. - 1980)\":\n theseKeys.append(\"ref_date\")\n elif i == \"REFCODE\":\n theseKeys.append(\"ref\")\n elif i == \"Exclusion Code\":\n theseKeys.append(\"dist_in_ned_flag\")\n elif i == \"Adopted LMC modulus\":\n theseKeys.append(\"lmc_mod\")\n elif i == \"m-M\":\n theseKeys.append(\"dist_mod\")\n elif i == \"Notes\":\n theseKeys.append(\"notes\")\n elif i == \"SN ID\":\n theseKeys.append(\"dist_derived_from_sn\")\n elif i == \"method\":\n theseKeys.append(\"dist_method\")\n elif i == \"Galaxy ID\":\n theseKeys.append(\"primary_ned_id\")\n elif i == \"D\":\n theseKeys.append(\"dist_index_id\")\n else:\n theseKeys.append(i)\n continue\n\n if len(theseKeys):\n count += 1\n if count > 1:\n # Cursor up one line and clear line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n if count > totalCount:\n count = totalCount\n percent = (float(count) / float(totalCount)) * 100.\n print \"%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory\" % locals()\n rowDict = {}\n for t, r in zip(theseKeys, row):\n rowDict[t] = r\n if t == \"ref_date\":\n try:\n rowDict[t] = int(r) + 1980\n except:\n rowDict[t] = None\n\n if rowDict[\"dist_index_id\"] != \"999999\":\n dictList.append(rowDict)\n\n csvFile.close()\n\n self.log.debug(\n 'completed the ``_create_dictionary_of_ned_d`` method')\n return dictList", "def l_kl_map(self) -> l_to_kl_mapper:\n return self._l_kl_map", "def create_map(filename: str) -> TravelMap:\n travel_map = {}\n for line in open(filename, \"r\"):\n loc1, loc2, dist = parse_line(line)\n add_locs(travel_map, loc1, loc2, dist)\n add_locs(travel_map, loc2, loc1, dist)\n return travel_map", "def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'", "def make_node_dict(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n self.node_dict1 = {}\n for node in self.input1['knowledge_graph']['nodes']:\n self.node_dict1[node['id']] = node\n self.node_dict2 = {}\n for node in self.input2['knowledge_graph']['nodes']:\n self.node_dict2[node['id']] = node", "def _makehash():\n return defaultdict(_makehash)", "def __init__ (self):\n self.lengths = {}\n self.lower_counts = {}\n self.upper_counts = {}\n self.digit_counts = {}\n self.symbol_counts = {}\n self.class_counts = {}\n self.word_counts = {}", "def __init__(self):\n self.hash_map = {}", "def gen_dict():\n lines = [line for line in csv.reader(open(__ppath__ + \"/data/occupations.csv\"))] # uses a csv.reader to parse the file, converts the generic iterable to a list\n lines = [(line[0],float(line[1])) for line in lines[1:-2]]# removes the column names and \"Total\" row, re-expresses as a list of tuples to enable dictionary conversion\n lines.append((\"Unemployed\",0.2)) # accounts for missing 0.2% of jobs\n return dict(lines) # converts to dictionary", "def _calculate_medians( locations ):\n return {k: (l[0]+l[1])/2 for k, l in locations.iteritems()}", "def dictionary_of_metrics(items):\n \n n = len(items)\n average = round(np.mean(items), 2)\n median = round(np.median(items), 2)\n variance = round((sum((items-np.mean(items))**2))/(n-1), 2)\n standard_dev = round(((sum((items-np.mean(items))**2))/(n-1))**(1/2), 2)\n minimum = round(min(items), 2)\n maximum = round(max(items), 2)\n \n return {'mean':average,'median':median,'var':variance,'std':standard_dev,'min':minimum,'max':maximum}\n pass" ]
[ "0.59770435", "0.5825306", "0.5625832", "0.5522428", "0.5472376", "0.5450063", "0.54420954", "0.5432603", "0.5386902", "0.5372055", "0.5325172", "0.5304393", "0.53000337", "0.5292728", "0.5285071", "0.5272437", "0.52629334", "0.5260887", "0.5257762", "0.5255245", "0.5242287", "0.52381957", "0.52374804", "0.52265465", "0.5225795", "0.52172726", "0.5212587", "0.521168", "0.5209874", "0.520263", "0.5197377", "0.5190054", "0.5164025", "0.51628447", "0.5162073", "0.515947", "0.5156636", "0.5132924", "0.5127329", "0.51199853", "0.51147014", "0.5096601", "0.5082816", "0.5073628", "0.506734", "0.5067211", "0.50618774", "0.50592303", "0.50589734", "0.5051897", "0.50443035", "0.5034531", "0.50314677", "0.50294733", "0.50294733", "0.502945", "0.50274515", "0.5025029", "0.5023636", "0.5021872", "0.5015989", "0.50130874", "0.50043607", "0.500034", "0.49957752", "0.49918914", "0.49900967", "0.4986018", "0.49848223", "0.4978934", "0.49753663", "0.49732265", "0.49720338", "0.49665853", "0.49587855", "0.4954842", "0.4951196", "0.49498567", "0.494353", "0.4942207", "0.49381652", "0.49301913", "0.49293128", "0.49286482", "0.492466", "0.49243277", "0.49227718", "0.4922427", "0.49208108", "0.49106213", "0.4907467", "0.48974994", "0.48960552", "0.48949692", "0.4890972", "0.4888272", "0.48865026", "0.48857486", "0.48820597", "0.48747236" ]
0.7121498
0
Loads MNIST files into 3D numpy arrays
def load_mnist(dataset="training", digits=np.arange(10), path="."): if dataset == "training": fname_img = os.path.join(path, 'train-images-idx3-ubyte') fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte') elif dataset == "testing": fname_img = os.path.join(path, 't10k-images-idx3-ubyte') fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte') else: raise ValueError("dataset must be 'testing' or 'training'") flbl = open(fname_lbl, 'rb') magic_nr, size = struct.unpack(">II", flbl.read(8)) lbl = pyarray("b", flbl.read()) flbl.close() fimg = open(fname_img, 'rb') magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16)) img = pyarray("B", fimg.read()) fimg.close() ind = [ k for k in range(size) if lbl[k] in digits ] N = len(ind) images = zeros((N, rows, cols), dtype=uint8) labels = zeros((N, 1), dtype=int8) for i in range(len(ind)): images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols)) labels[i] = lbl[ind[i]] return images, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_mnist(kind='train'):\r\n with open('%s-labels.idx1-ubyte' % kind, 'rb') as lbpath:\r\n magic, n = struct.unpack('>II', lbpath.read(8))\r\n labels = np.fromfile(lbpath, dtype=np.uint8)\r\n\r\n with open('%s-images.idx3-ubyte' % kind, 'rb') as imgpath:\r\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\r\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\r\n\r\n return images, labels", "def load_mnist(path='mnist/mnist.npz'):\n\n with np.load(path) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n x_train = x_train.astype(np.float32) / 255.\n y_train = y_train.astype(np.int32)\n x_test = x_test.astype(np.float32) / 255.\n y_test = y_test.astype(np.int32)\n \n return (x_train, y_train), (x_test, y_test)", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'%s-labels-idx1-ubyte.gz'% kind)\n\n images_path = os.path.join(path,'%s-images-idx3-ubyte.gz'% kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,offset=16).reshape(len(labels), 784)\n\n print(\"Dataset Loaded\")\n \n return images, labels", "def load_mnist(path, kind='train'):\n\tlabels_path = os.path.join(path,'%s-labels.idx1-ubyte'%kind)\n\timages_path = os.path.join(path,'%s-images.idx3-ubyte'%kind)\n\t\n\twith open(labels_path, 'rb') as lbpath:\n\t\tmagic, n = struct.unpack('>II', lbpath.read(8))\n\t\tlabels = np.fromfile(lbpath, dtype=np.uint8)\n\t\t\n\twith open(images_path, 'rb') as imgpath:\n\t\tmagic, num, row, cols = struct.unpack('>IIII', imgpath.read(16))\n\t\timages = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\n\t\n\treturn images, labels", "def load_mnist(path, kind='train'):\n '''ref: http://yann.lecun.com/exdb/mnist/ '''\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n\n # check the offical doc to know how to extract the content\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000801(2049) magic number (MSB first)\n 0004 32 bit integer 60000 number of items\n 0008 unsigned byte ?? label\n 0009 unsigned byte ?? label\n ........\n xxxx unsigned byte ?? label\n The labels values are 0 to 9.\n '''\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000803(2051) magic number\n 0004 32 bit integer 60000 number of images\n 0008 32 bit integer 28 number of rows\n 0012 32 bit integer 28 number of columns\n 0016 unsigned byte ?? pixel\n 0017 unsigned byte ?? pixel\n ........\n xxxx unsigned byte ?? pixel\n Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).\n '''\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_mnist(path, kind='train'):\n\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte' % kind)\n \n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n \n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n \n return images, labels", "def load_mnist(path, kind = 'train'):\n label_path = os.path.join(path, '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte' % kind)\n\n\n with open(label_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', lbpath.read(8))\n\n labels = np.fromfile(lbpath, dtype= np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\n\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels),784)\n\n\n return images, labels", "def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'{}-labels-idx1-ubyte'.format(kind))\n images_path = os.path.join(path,'{}-images-idx3-ubyte'.format(kind))\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8).reshape(n)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape((num,1,rows,cols))\n print(kind)\n print(\"label num:\",n)\n print(\"image num:\",num)\n print(\"image rows:\",rows)\n print(\"image cols:\",cols)\n images = images/255\n return images, labels", "def read_mnist_images(filename, dtype=None):\n with gzip.open(filename, 'rb') as f:\n magic, number, rows, cols = struct.unpack('>iiii', f.read(16))\n if magic != MNIST_IMAGE_MAGIC:\n raise ValueError(\"Wrong magic number reading MNIST image file\")\n array = numpy.frombuffer(f.read(), dtype='uint8')\n array = array.reshape((number, 1, rows, cols))\n if dtype:\n dtype = numpy.dtype(dtype)\n\n if dtype.kind == 'b':\n # If the user wants Booleans, threshold at half the range.\n array = array >= 128\n elif dtype.kind == 'f':\n # Otherwise, just convert.\n array = array.astype(dtype)\n array /= 255.\n else:\n raise ValueError(\"Unknown dtype to convert MNIST to\")\n return array", "def extract_images(filename):\n\tprint('Extracting', filename)\n\twith gzip.open(filename) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2051:\n\t\t\traise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, filename))\n\t\tnum_images = _read32(bytestream)\n\t\trows = _read32(bytestream)\n\t\tcols = _read32(bytestream)\n\t\tbuf = bytestream.read(rows * cols * num_images)\n\t\tdata = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tdata = data.reshape(num_images, rows, cols, 1)\n\t\treturn data", "def _extract_images(self, filename):\n log.info('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = self._read32(bytestream)\n rows = self._read32(bytestream)\n cols = self._read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def load_data():\n prefix = 'mnist_data/'\n train_data = np.load(prefix + 'mnist_train_images.npy')\n train_labels = np.load(prefix + 'mnist_train_labels.npy')\n val_data = np.load(prefix + 'mnist_validation_images.npy')\n val_labels = np.load(prefix + 'mnist_validation_labels.npy')\n test_data = np.load(prefix + 'mnist_test_images.npy')\n test_labels = np.load(prefix + 'mnist_test_labels.npy')\n assert train_data.shape == (55000, 784) and train_labels.shape == (55000, 10)\n assert val_data.shape == (5000, 784) and val_labels.shape == (5000, 10)\n assert test_data.shape == (10000, 784) and test_labels.shape == (10000, 10)\n return train_data, train_labels, val_data, val_labels, test_data, test_labels", "def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)[0]\n rows = _read32(bytestream)[0]\n cols = _read32(bytestream)[0]\n #print('check', magic, num_images, rows, cols, rows * cols * num_images)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def load_data(m=5000, n=100, path='D:/file/vscode/py/data/mnist.npz'):\r\n f = np.load(path)\r\n x_train, y_train = f['x_train'], f['y_train']\r\n\r\n x_test, y_test = f['x_test'], f['y_test']\r\n\r\n f.close()\r\n return (x_train, y_train), (x_test, y_test)", "def extract_images(f):\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def load_fashion_mnist(path, kind='train'):\n labels_path = os.path.join(path, '%s-labels-idx1-ubyte.gz' % kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte.gz' % kind)\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784)\n return images, labels", "def load_fmnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def read_mnist_labels(filename):\n with gzip.open(filename, 'rb') as f:\n magic, _ = struct.unpack('>ii', f.read(8))\n if magic != MNIST_LABEL_MAGIC:\n raise ValueError(\"Wrong magic number reading MNIST label file\")\n array = numpy.frombuffer(f.read(), dtype='uint8')\n array = array.reshape(array.size, 1)\n return array", "def load_images(filename='training_images'): \n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read() # hope ya get it all\n\n # grab the first four numbers ...\n # fmt='>i' means big-endian int32\n magic, n_images, n_rows, n_cols = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(4))\n\n # i am a god-fearing man\n assert magic[0] == 2051, \"bad magic number, what do?\"\n\n\n # so i think you can use the standard libary's \"array\" for this, just\n # because binary data of any sort is kinda dodgy, but this grabs 'the rest'\n # format='B' means unsigned char === 'uint8', and apparently endianness doesn't matter\n image_stream = array.array('B', b[16:])\n\n # so each 28*28 byte portion of image_stream is a flattened image. these two\n # numpy.reshape calls get it into the desired shape for A. maybe could\n # combine it into one call, idk. anyway, each flattened image appears as a\n # row, and there is a row for each image.\n image_first = numpy.reshape(image_stream, (n_images[0], n_rows[0], n_cols[0]))\n images = image_first.reshape(n_images[0], n_rows[0]*n_cols[0])\n\n # convert to float in [0,1]\n images = images.astype('f') / 255\n\n return images", "def MNIST_data():\n\n # Pobieramy macierze numpy z cyframi\n # images[i,j,k] <=> piksel (j,k) z i-tego obrazka w zbiorze danych\n images, labels = get_MNIST_dataset(range(10), \"training\") #pierwszy argument to\n\n # a) Ilosc przykladow i rozmiary danych\n print \"Raw training data dimensions \", images.shape\n print \"Labels dimensions \",labels.shape\n\n # b) Ile jest cyfr 2?\n print \"Counting 2 in training dataset \",len(filter(lambda x: x == 2, labels))\n\n # c) Jaki jest sredni obrazek 2 ? (Usrednienie wszystkich macierzy ktore sa 2)\n\n #1. Pobierzmy wszystkie dwojki, fajny sposob indeksowania\n print labels == 2\n only_2 = images[labels == 2, :, :]\n print \"Checking number of 2s \", only_2.shape\n\n #2. TODO: Usrednienie (matrix.mean moze byc przydatne)\n\n #3. TODO: narysowanie usrednionej cyfry (zobacz pl.imshow)\n\n # d) Ostatnie - przetworzmy ostatnia cyfre do 1 wymiarowego wektora\n vectorized = np.reshape(images[-1], newshape=(images[-1].shape[0]*images[-1].shape[1]))\n print \"Vectorized last digit \", vectorized", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_images(filename,lx):\n print('Extracting', filename,'aaaaaa')\n \n data=numpy.loadtxt(filename,dtype='int64')\n dim=data.shape[0]\n data=data.reshape(dim, lx, lx, 1) \n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n data = data.reshape(data.shape[0],\n data.shape[1] * data.shape[2])\n # Convert from [0, 255] -> [0.0, 1.0].\n data = data.astype(numpy.float64)\n # images = numpy.multiply(images, 1.0 / 255.0) # commented since it is ising variables\n data = numpy.multiply(data, 1.0 ) # multiply by one, instead\n print(data.shape)\n return data", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_images(f):\n\tprint('Extracting', f.name)\n\twith gzip.GzipFile(fileobj=f) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2051:\n\t\t\traise ValueError('Invalid magic number %d in MNIST image file: %s' %\n\t\t\t\t\t\t\t\t\t\t\t (magic, f.name))\n\t\tnum_images = _read32(bytestream)\n\t\trows = _read32(bytestream)\n\t\tcols = _read32(bytestream)\n\t\tbuf = bytestream.read(rows * cols * num_images)\n\t\tdata = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tdata = data.reshape(num_images, rows, cols, 1)\n\t\treturn data", "def as_mnist(filename, imwidth):\n\n images = []\n labels = []\n \n if filename.find(\"devel\") != -1:\n print(\"we're working with the development set: \" + filename)\n\n for cls, data in enumerate(load(filename)):\n for example in data:\n labels.append(cls)\n image = numpy.zeros(shape=(imwidth, imwidth), dtype='uint8')\n for (x, y) in example:\n x_ = int(round(imwidth * x))\n y_ = int(round(1-(imwidth * y)))\n image[y_, x_] = 255\n images.append(image.flatten())\n\n return numpy.vstack(images).T.copy(), numpy.array(labels)", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def load_train_small():\n with open('mnist_train_small.npz', 'rb') as f:\n train_set_small = np.load(f)\n train_inputs_small = train_set_small['train_inputs_small']\n train_targets_small = train_set_small['train_targets_small']\n return train_inputs_small, train_targets_small", "def load_data():\n (trainx, trainy), (valx, valy), (testx, testy) = pickle.load(gzip.open(\"data/mnist_one_hot.pkl.gz\"),\n encoding=\"latin1\")\n trainy = np.argmax(trainy, axis=1)\n valy = np.argmax(valy, axis=1)\n testy = np.argmax(testy, axis=1)\n trainx = trainx * 2 - 1\n valx = valx * 2 - 1\n testx = testx * 2 - 1\n return (trainx.reshape(-1, 1, 28, 28), trainy), (valx.reshape(-1, 1, 28, 28), valy), (testx.reshape(-1, 1, 28, 28),\n testy)", "def import_mnist():\n\turl_mnist = \"http://deeplearning.net/data/mnist/mnist.pkl.gz\"\n\tfile_name = \"mnist.pkl.gz\"\n\twork_directory = \"mnist\"\n\tfile_path = maybe_download(url=url_mnist, file_name=file_name, work_directory=work_directory)\n\n\timport pickle\n\twith gzip.open(file_path,'rb') as ff :\n\t\tu = pickle._Unpickler( ff )\n\t\tu.encoding = 'latin1'\n\t\ttrain, val, test = u.load()\n\t\ttrainX = np.array(train[0])\n\t\ttrainY = np.reshape(train[1], [50000, 1])\n\t\tvalX = np.array(val[0])\n\t\tvalY = np.reshape(val[1], [10000, 1])\n\t\ttestX = np.array(test[0])\n\t\ttestY = np.reshape(test[1], [10000, 1])\n\t\ttrainX = np.concatenate((trainX, valX), axis = 0)\n\t\ttrainY = np.concatenate((trainY, valY), axis = 0)\n\treturn trainX, trainY, testX, testY", "def load_mnist_dataset(shape=(-1,784)):\n # We first define a download function, supporting both Python 2 and 3.\n if sys.version_info[0] == 2:\n from urllib import urlretrieve\n else:\n from urllib.request import urlretrieve\n\n def download(filename, source='http://yann.lecun.com/exdb/mnist/'):\n print(\"Downloading %s\" % filename)\n urlretrieve(source + filename, filename)\n\n # We then define functions for loading MNIST images and labels.\n # For convenience, they also download the requested files if needed.\n import gzip\n\n def load_mnist_images(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n # The inputs are vectors now, we reshape them to monochrome 2D images,\n # following the shape convention: (examples, channels, rows, columns)\n data = data.reshape(shape)\n # data = data.reshape(-1, 1, 28, 28) # for lasagne\n # data = data.reshape(-1, 28, 28, 1) # for tensorflow\n # data = data.reshape(-1, 784) # for tensorflow\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n # (Actually to range [0, 255/256], for compatibility to the version\n # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n return data / np.float32(256)\n\n def load_mnist_labels(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return data\n\n # We can now download and read the training and test set images and labels.\n ## you may want to change the path\n data_dir = '' #os.getcwd() + '/lasagne_tutorial/'\n # print('data_dir > %s' % data_dir)\n\n X_train = load_mnist_images(data_dir+'train-images-idx3-ubyte.gz')\n y_train = load_mnist_labels(data_dir+'train-labels-idx1-ubyte.gz')\n X_test = load_mnist_images(data_dir+'t10k-images-idx3-ubyte.gz')\n y_test = load_mnist_labels(data_dir+'t10k-labels-idx1-ubyte.gz')\n\n # We reserve the last 10000 training examples for validation.\n X_train, X_val = X_train[:-10000], X_train[-10000:]\n y_train, y_val = y_train[:-10000], y_train[-10000:]\n\n ## you may want to plot one example\n # print('X_train[0][0] >', X_train[0][0].shape, type(X_train[0][0])) # for lasagne\n # print('X_train[0] >', X_train[0].shape, type(X_train[0])) # for tensorflow\n # # exit()\n # # [[..],[..]] (28, 28) numpy.ndarray\n # # plt.imshow 只支持 (28, 28)格式,不支持 (1, 28, 28),所以用 [0][0]\n # fig = plt.figure()\n # #plotwindow = fig.add_subplot(111)\n # # plt.imshow(X_train[0][0], cmap='gray') # for lasagne (-1, 1, 28, 28)\n # plt.imshow(X_train[0].reshape(28,28), cmap='gray') # for tensorflow (-1, 28, 28, 1)\n # plt.title('A training image')\n # plt.show()\n\n # We just return all the arrays in order, as expected in main().\n # (It doesn't matter how we do this as long as we can read them again.)\n return X_train, y_train, X_val, y_val, X_test, y_test", "def readmnist(dataset = \"training\", path = \".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')\n # else:\n # raise ValueError, \"dataset must be 'testing' or 'training'\"\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in xrange(len(lbl)):\n yield get_img(i)", "def load_from_array():\n\n x = np.load(settings.data(\"x.npy\")).reshape(-1, 1, 224, 224)\n y = np.load(settings.data(\"y.npy\"))\n\n return x, y", "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def load_data(path='mnist.npz'):\n origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'\n path = get_file(\n path,\n origin=origin_folder + 'mnist.npz',\n file_hash=\n '731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1')\n print('############################################' + path) \n with np.load(path, allow_pickle=True) as f: # pylint: disable=unexpected-keyword-arg\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n\n return (x_train, y_train), (x_test, y_test)", "def load_groundtruths(folder_path, num_images):\n imgs = []\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n # See if it is better to use dtype = int\n hot_img = convert_image_to_hot(img)\n imgs.append(hot_img)\n else:\n print('File ' + image_path + ' does not exist')\n #imgs = np.around(imgs) # Uncomment if we want to round values.\n imgs_array = np.asarray(imgs)\n return imgs_array", "def _load_mnist(path, dataset=\"training\", digits=None, asbytes=False,\n selection=None, return_labels=True, return_indices=False):\n\n # The files are assumed to have these names and should be found in 'path'\n files = {\n 'training': ('train-images-idx3-ubyte', 'train-labels-idx1-ubyte'),\n 'testing': ('t10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte'),\n }\n\n try:\n images_fname = os.path.join(path, files[dataset][0])\n labels_fname = os.path.join(path, files[dataset][1])\n except KeyError:\n raise ValueError(\"Data set must be 'testing' or 'training'\")\n\n # We can skip the labels file only if digits aren't specified and labels\n # aren't asked for\n if return_labels or digits is not None:\n flbl = open(labels_fname, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n labels_raw = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(images_fname, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n images_raw = pyarray(\"B\", fimg.read())\n fimg.close()\n\n if digits:\n indices = [k for k in range(size) if labels_raw[k] in digits]\n else:\n indices = range(size)\n\n if selection:\n indices = indices[selection]\n\n images = np.zeros((len(indices), rows, cols), dtype=np.uint8)\n\n if return_labels:\n labels = np.zeros((len(indices)), dtype=np.int8)\n for i in range(len(indices)):\n images[i] = np.array(images_raw[indices[i] * rows * cols:(indices[i] + 1) * rows * cols]).reshape((rows, cols))\n if return_labels:\n labels[i] = labels_raw[indices[i]]\n\n if not asbytes:\n images = images.astype(float)/255.0\n\n ret = (images,)\n if return_labels:\n ret += (labels,)\n if return_indices:\n ret += (indices,)\n\n if len(ret) == 1:\n return ret[0] # Don't return a tuple of one\n\n return ret", "def load_images(self, image_path):\n X_train = []\n\n # Load all files from the image path using Image.open.\n for i in recursive_list(image_path):\n # Open images as ???\n img = Image.open(i)\n # Convert to NP array.\n img = np.asarray(img)\n # Append them into higher order array.\n if img.shape == (128, 128, 3):\n X_train.append(img)\n\n # return all the images concatenated as a 4D array\n return np.asarray(X_train)", "def mnist(path=None):\r\n url = 'http://yann.lecun.com/exdb/mnist/'\r\n files = ['train-images-idx3-ubyte.gz',\r\n 'train-labels-idx1-ubyte.gz',\r\n 't10k-images-idx3-ubyte.gz',\r\n 't10k-labels-idx1-ubyte.gz']\r\n\r\n if path is None:\r\n # Set path to /home/USER/data/mnist or C:\\Users\\USER\\data\\mnist\r\n path = os.path.join(os.path.expanduser('~'), 'data', 'mnist')\r\n\r\n # Create path if it doesn't exist\r\n os.makedirs(path, exist_ok=True)\r\n\r\n # Download any missing files\r\n for file in files:\r\n if file not in os.listdir(path):\r\n urlretrieve(url + file, os.path.join(path, file))\r\n print(\"Downloaded %s to %s\" % (file, path))\r\n\r\n def _images(path):\r\n \"\"\"Return images loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255\r\n\r\n def _labels(path):\r\n \"\"\"Return labels loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 8 bytes are magic_number, n_labels\r\n integer_labels = np.frombuffer(f.read(), 'B', offset=8)\r\n\r\n def _onehot(integer_labels):\r\n \"\"\"Return matrix whose rows are onehot encodings of integers.\"\"\"\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot\r\n\r\n return _onehot(integer_labels)\r\n\r\n train_images = _images(os.path.join(path, files[0]))\r\n train_labels = _labels(os.path.join(path, files[1]))\r\n test_images = _images(os.path.join(path, files[2]))\r\n test_labels = _labels(os.path.join(path, files[3]))\r\n\r\n return train_images, train_labels, test_images, test_labels", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def load_data(filename):\n emnist = loadmat(filename)\n\n # Load training images and labels\n train_images_unshuffled = emnist['train_images']\n train_labels_unshuffled = emnist['train_labels']\n\n # Combine labels and training data\n combined_training = np.hstack((train_images_unshuffled, train_labels_unshuffled))\n\n # Shuffle data\n np.random.shuffle(combined_training)\n\n # Seperate into data and labels\n # Split into training and validation sets\n train_images = combined_training[:20800,:-1] / 255 # Normalize data, values are now between 0 and 1\n train_labels = combined_training[:20800,-1][...,None] # Turns back into column vector\n validation_images = combined_training[20800:,:-1] / 255 # Normalize data, values are now between 0 and 1\n validation_labels = combined_training[20800:,-1][...,None] # Turns back into column vector\n\n # Load training images and labels\n test_images = emnist['test_images'] / 255 # Normalize data, values are now between 0 and 1\n test_labels = emnist['test_labels']\n\n return train_images, train_labels, test_images, test_labels, validation_images, validation_labels", "def load_dataset(path_test, width, height):\n tot_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n tot_images += 1\n\n # allocate the memory\n # THE DTYPE is float, should be the right one\n all_images = np.zeros((tot_images, width, height, 3))\n\n true_labels = []\n num_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n # for img_name in listdir(label_full)[:10]:\n img_name_full = join(label_full, img_name)\n print(f\"Opening {img_name_full} {width}\")\n\n image = cv2.imread(img_name_full)\n\n image = cv2.resize(image, (width, height))\n\n # scale the pixel values to [0, 1]\n image = image.astype(\"float\") / 255.0\n\n all_images[num_images, :, :, :] = image\n\n num_images += 1\n true_labels.append(label)\n\n print(f\"All_images.shape {all_images.shape}\")\n\n # cv2.imshow('Resized all_images[0]', all_images[0])\n # cv2.waitKey(0)\n\n return all_images, true_labels", "def readMNISTData():\n mnist = input_data.read_data_sets(\"MNIST_data\",one_hot=True) \n return mnist", "def load_images(file):\n\timage_list = [] # List for storing all the images\n\ttargets = []\n\t\n\tfor filename in glob.glob(file + '/*.png'):\n\t\t# ==================\n\t\t# Reading the image\n\t\t# ==================\n\t\timage = scipy.misc.imread(filename).astype(np.float32)\n\t\t\n\t\t# ================================\n\t\t# Converting the image to a vector\n\t\t# ================================\n\t\timage = image.flatten() # (784, )\n\t\t\n\t\t# ==============================\n\t\t# Normalizing the image to numpy\n\t\t# ==============================\n\t\timage = image / 255.0\n\t\timage = image - 0.5\n\t\timage = image * 2.0\n\t\t\n\t\t# ===============================\n\t\t# Appending the image to the list\n\t\t# ===============================\n\t\timage_list.append(image)\n\t\t\n\t\t_, value = filename.split('\\\\')\n\t\t# print(value[0])\n\t\ttargets.append(int(value[0]))\n\t\n\timage_list = np.array(image_list)\n\ttargets = np.array(targets)\n\t\n\t# ================================================\n\t# \t\t\tShuffling the data\n\t# ================================================\n\timage_list, targets = shuffle(image_list, targets)\n\t\n\ttrain_images, test_images, train_targets, test_targets = split(image_list, targets)\n\treturn train_images, test_images, train_targets, test_targets", "def load_food_image_batch(filename, num):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f)\n url_parts = datadict['Image URL'].split(\"/\")\n img_fn = url_parts[-1]\n with open(img_fn):\n X = f.read()\n Y = datadict['coarse_labels']\n X = X.reshape(num, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y", "def read_picture_data(filename):\n file_name = os.path.join('.', 'datas', filename)\n\n try:\n with open(file_name, 'rb') as file:\n read_data = file.read()\n except FileNotFoundError:\n print(f'Oups, the file {filename} was not found')\n\n try:\n if filename == 'train-images.idx3-ubyte':\n number_of_pics = 60000\n else:\n number_of_pics = 10000\n except LookupError:\n print(f'Oups, the file {filename} was not named as a MNist file')\n\n picture_data = np.zeros((number_of_pics, 28 * 28)) # 28*28 = 784\n\n s = 0\n for n in range(16, number_of_pics * 784, 784): # 16 header bytes being dumped\n for t, byte in enumerate(read_data[n: n + 784]):\n picture_data[s, t] = byte\n s += 1\n\n print(f'\\nPicture data read from {filename}\\n')\n\n return picture_data", "def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)", "def read_gz(images,labels):\n\t# Open the images with gzip in read binary mode\n\t# images = gzip.open('../MNIST-data/train-images-idx3-ubyte.gz', 'rb')\n\t# labels = gzip.open('../MNIST-data/train-labels-idx1-ubyte.gz', 'rb')\n\n\t# Read the binary data\n\n\t# We have to get big endian unsigned int. So we need '>I'\n\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]#28\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]#28\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0] #60000\n\t# print(number_of_images);\n\n\tif number_of_images != N:\n\t raise Exception('number of labels did not match the number of images')\n\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array #60000X28X28\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t if i % 1000 == 0:\n\t print(\"i: %i\" % i)\n\t for row in range(rows):\n\t for col in range(cols):\n\t tmp_pixel = images.read(1) # Just a single byte\n\t tmp_pixel = unpack('>B', tmp_pixel)[0]\n\t x[i][row][col] = tmp_pixel\n\t tmp_label = labels.read(1)\n\t y[i] = unpack('>B', tmp_label)[0]\n\t # print(y.shape)#60000X1\n\treturn (x, y)", "def load(filename):\n img = image.load_img(filename, target_size=(299, 299))\n np_image = image.img_to_array(img)\n np_image = np.array(np_image).astype('float32')/255\n\n # Make to a rank 4 tensor (1, 299, 299, 3) -> 1 is for the batch size\n np_image = np.expand_dims(np_image, axis=0)\n\n return np_image", "def load_fashion_mnist():\n # List of image file names\n dataset_directory = os.path.join(root_directory,'Fashion_MNIST')\n filenames = os.listdir(dataset_directory)\n filenames.sort()\n\n # List of numpy array; each row is a Image of the dataset\n data = []\n\n # Numpy array of labels associated to each class of image\n target = np.empty([len(filenames), ])\n\n previous_label = ''\n class_num = -1\n index = 0\n\n for index, filename in enumerate(filenames):\n data.append(Bitmap(io.imread(os.path.join(dataset_directory, filename))))\n file_label = filename.split('-')[0]\n\n if(previous_label != file_label):\n previous_label = file_label\n class_num += 1\n target[index] = class_num\n else:\n target[index] = class_num\n\n return {'bitmaps': data, 'targets': target}", "def load_EMNIST_data(file, verbose = False, standarized = False): \n mat = sio.loadmat(file)\n data = mat[\"dataset\"]\n \n X_train = data['train'][0,0]['images'][0,0]\n X_train = X_train.reshape((X_train.shape[0], 28, 28), order = \"F\")\n y_train = data['train'][0,0]['labels'][0,0]\n y_train = np.squeeze(y_train)\n y_train -= 1 #y_train is zero-based\n \n X_test = data['test'][0,0]['images'][0,0]\n X_test= X_test.reshape((X_test.shape[0], 28, 28), order = \"F\")\n y_test = data['test'][0,0]['labels'][0,0]\n y_test = np.squeeze(y_test)\n y_test -= 1 #y_test is zero-based\n \n if standarized: \n X_train = X_train/255\n X_test = X_test/255\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_test -= mean_image\n \n\n if verbose == True: \n print(\"EMNIST-letter dataset ... \")\n print(\"X_train shape :\", X_train.shape)\n print(\"X_test shape :\", X_test.shape)\n print(\"y_train shape :\", y_train.shape)\n print(\"y_test shape :\", y_test.shape)\n \n return X_train, y_train, X_test, y_test", "def mnist_training():\n mndata = MNIST(MNIST_PATH)\n train_ims, train_labels = mndata.load_training()\n train_X = np.array(train_ims).T\n train_y = np.array(train_labels).T\n return train_X, train_y", "def load_batch(filename: str) -> Tuple[ndarray, ndarray, ndarray]:\n dataDict = unpickle(filename)\n print(\"1\", dataDict[b\"data\"][1, :])\n X = (dataDict[b\"data\"] / 255).T\n print(\"2\", X[:, 1])\n y = np.array(dataDict[b\"labels\"])\n Y = np.eye(10)[y].T\n return X, Y, y", "def load_image_as_np(filename):\n try:\n img = image.load_img(filename, color_mode='grayscale') # for newer versions, use \"color_mode='grayscale'\"; For older versions, use \"grayscale=True\"\n return np.atleast_3d(img)\n except Exception as error:\n logging.error(traceback.format_exc())", "def bin2matrix(filename):\n filename = realpath(filename)\n Xy = np.load(filename)\n if len(Xy.shape) == 3:\n X = Xy[:,:,:-1]\n y = Xy[:,:,-1]\n else:\n X = Xy[:,:-1]\n y = Xy[:,-1]\n return X, y", "def load_data(path='alex_mnist_data.npz'):\n with np.load(path, allow_pickle=True) as f:\n x_train, y_train = f['alex_train_data'], f['alex_train_label']\n x_test, y_test = f['alex_test_data'], f['alex_test_label']\n return (x_train, y_train),(x_test, y_test)", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def load_dataset(ipc = 20000):\n files = os.listdir(\"..\\\\data\")\n ind = 0\n xs = []\n ys = []\n classNames = []\n for file in files:\n fileSplit = file.split('.')\n print('--Loading ' + fileSplit[0][18:] + ' data.')\n classNames.append(fileSplit[0][18:])\n x = np.load(\"..\\\\data\\\\\" + file)\n x = x.astype('float32')/255\n xs.append(x[0:ipc, :])\n y = np.array([float(ind) for i in range(ipc)])\n ys.append(y.reshape(ipc, 1))\n ind += 1\n\n xs = np.array(xs)\n ys = np.array(ys)\n xs = xs.reshape(xs.shape[0]*xs.shape[1], xs.shape[2])\n ys = ys.reshape(ys.shape[0]*ys.shape[1], ys.shape[2])\n return xs, ys, classNames", "def load(filename):\n lines = [l.strip('\\r\\n ') for l in open(filename, 'r').readlines()]\n lines = [l for l in lines if l != '']\n dims = [re.split(r'\\s+', l) for l in lines]\n f = np.array([[float(f) for f in d] for d in dims])\n return f", "def load_data(filename):\n assert os.path.exists(filename)==True\n dat = scipy.io.loadmat(filename)\n inputs = dat['inputs']\n #print len(inputs)\n targets = dat['targets']\n #print len(targets)\n assert len(inputs)==len(targets)\n\n global alldata\n global indim \n global outdim\n\n indim = len(inputs[0])\n outdim = 1\n #print indim\n alldata = ClassificationDataSet(indim, outdim, nb_classes = 8)\n alldata.setField('input',inputs)\n alldata.setField('target',targets)\n\n assert len(alldata['input'])==len(alldata['target'])\n print type(alldata)", "def load_data():\n f = gzip.open('mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(f, encoding=\"latin1\")\n f.close()\n \n X_train = [np.reshape(x, (784, 1)) for x in training_data[0]]\n Y_train = [vectorized_result(y) for y in training_data[1]]\n \n X_validation = [np.reshape(x, (784, 1)) for x in validation_data[0]]\n Y_validation = validation_data[1]\n \n X_test = [np.reshape(x, (784, 1)) for x in test_data[0]]\n Y_test = test_data[1]\n \n return (X_train, Y_train, X_validation, Y_validation, X_test, Y_test)", "def load_X():\n x1 = np.asarray([-1, -1, 1, -1, 1, -1, -1, 1]).reshape((1, 8))\n x2 = np.asarray([-1, -1, -1, -1, -1, 1, -1, -1]).reshape((1, 8))\n x3 = np.asarray([-1, 1, 1, -1, -1, 1, -1, 1]).reshape((1, 8))\n X = np.vstack([x1, x2, x3])\n\n return X", "def load_mnist (images_fn_gz, labels_fn_gz, digits=None, path=None, asbytes=False, selection=None, return_labels=True, return_indices=False):\n\n # We can skip the labels file only if digits aren't specified and labels aren't asked for\n if return_labels or digits is not None:\n flbl = gzip.open (labels_fn_gz, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n labels_raw = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = gzip.open(images_fn_gz, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n images_raw = pyarray(\"B\", fimg.read())\n fimg.close()\n\n if digits:\n indices = [k for k in range(size) if labels_raw[k] in digits]\n else:\n indices = range(size)\n\n if selection:\n indices = indices[selection] \n N = len(indices)\n\n images = zeros((N, rows, cols), dtype=uint8)\n\n if return_labels:\n labels = zeros((N), dtype=int8)\n for i, index in enumerate(indices):\n images[i] = array(images_raw[ indices[i]*rows*cols : (indices[i]+1)*rows*cols ]).reshape((rows, cols))\n if return_labels:\n labels[i] = labels_raw[indices[i]]\n\n if not asbytes:\n images = images.astype(float)/255.0\n\n ret = (images,)\n if return_labels:\n ret += (labels,)\n if return_indices:\n ret += (indices,)\n if len(ret) == 1:\n return ret[0] # Don't return a tuple of one\n else:\n return ret", "def load_data():\n\n \"\"\"The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\"\"\"\n\n \"\"\"The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\"\"\"\n\n \"\"\"The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\"\"\"\n f = gzip.open('MNIST/data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = Pickle.load(f, encoding='bytes'\n )\n f.close()\n return (training_data, validation_data, test_data)", "def imagesMatrix(path,imageSize = 10304,byteorder = '>'):\n listing = os.listdir(path)\n listing.sort()\n count = 0\n docFiles = []\n for infile in listing:\n count = count + 1\n docFiles.append(infile)\n matrix = np.zeros((imageSize,count))\n for i in range(len(listing)):\n matrix[:,i]=np.asarray(read_pgm(join(path,listing[i]),byteorder)).reshape(-1)\n return matrix,listing", "def load_rgb_image_as_3d_array(filename):\n image = np.asarray(Image.open(filename))\n image_shape = image.shape\n image_dim = len(image_shape)\n assert(image_dim == 2 or image_dim == 3)\n if(image_dim == 2):\n image = np.expand_dims(image, axis = 0)\n else:\n # transpose rgb image from [H, W, C] to [C, H, W]\n assert(image_shape[2] == 3 or image_shape[2] == 4)\n if(image_shape[2] == 4):\n image = image[:, :, range(3)]\n image = np.transpose(image, axes = [2, 0, 1])\n output = {}\n output['data_array'] = image\n output['origin'] = (0, 0)\n output['spacing'] = (1.0, 1.0)\n output['direction'] = 0\n return output", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n datadict = p.load(f, encoding='iso-8859-1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def _images(path):\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255", "def load_data_in_folder(self):\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in range(0, idx_max-1):\n data = []\n for f in self.filenames[idx:idx+64]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def tns3d_batch(self, tensorlist, output_resolution):\n dimensions = 3\n formats = 5\n filenames = []\n\n ## record the file dir\n filelist = open(tensorlist)\n for line in filelist:\n list_seg = line.split()\n filenames.append(list_seg[-1])\n filelist.close()\n\n ## get the batch data of 3d tensors and their basic info\n batch_size = len(filenames)\n tensor_batch = np.zeros((batch_size, dimensions, output_resolution, output_resolution), dtype='int32')\n for findex in range(0, batch_size):\n img = self.tns3d_Sample(filenames[findex], RES)\n tensor_batch[findex, :, :, :] = img\n print('**** the {}th tns flattening sampled finished ({} in total) ****'.format(findex, batch_size))\n return tensor_batch", "def mnist(path):\n with open(path, 'r') as f:\n for line in f:\n data = line.strip().split(',')\n\n # Label is a vector with one element per class\n label = [0.0] * 10\n label[int(data[0])] = 1.0 \n\n # The data are images of 28x28 pixels\n image_array = np.asfarray(data[1:]).reshape((28, 28))\n # Normalize the pictures \n image_array = image_array / 255.0\n\n #plt.imshow(image_array, cmap='Greys', interpolation='None')\n yield (image_array, label)", "def load_data(path,size, scale = True):\n images = os.listdir(path)\n images.sort()\n\n X = []\n for i, img in enumerate(images):\n photo = plt.imread(os.path.join(path,img))\n if size:\n photo = tf.image.resize(photo, (size, size))\n X.append(photo)\n \n X = np.array(X)\n if scale:\n X = X/X.max() \n return X", "def load_MNIST_data():\n mnist = input_data.read_data_sets('data', one_hot=True)\n return {'train': mnist.train.images,\n 'validation': mnist.validation.images,\n 'test': mnist.test.images}", "def load_data(flatten=True):\n if flatten:\n reshape = _flatten\n else:\n reshape = _square\n\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = reshape(x_train)\n x_test = reshape(x_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n y_train = to_categorical(y_train, 10)\n y_test = to_categorical(y_test, 10)\n return x_train, y_train, x_test, y_test", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_test = np.load(\"data/X_test.npy\")\n\t\t\t\tY_test = np.load(\"data/Y_test.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tX_test = np.zeros((10000,64,64,3))\n\t\t\t\tY_test = []\n\n\t\t\t\t\n\t\t\t\twith open(path, 'rb') as fo:\n\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\tY_test.extend(temp_element[b'labels'])\n\n\t\t\t\tfor j in range(10000):\n\t\t\t\t\tX_test[j] = self._reshape(temp_data[j])\n\n\t\t\t\tY_test = np.eye(10)[np.array(Y_test)]\n\t\t\t\t\n\t\t\t\tnp.save(\"./data/X_test\", X_test)\n\t\t\t\tnp.save(\"./data/Y_test\", Y_test)\n\n\t\t\t\tbreak\n\n\n\t\treturn X_test, Y_test", "def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)", "def loadtrainData():\n train_x = []\n train_y = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n train_x.append([float(lineArr[i]) for i in range(len(lineArr) - 1)])\n train_y.append(int(lineArr[-1]))\n return np.mat(train_x), np.mat(train_y).transpose()", "def mnist(path):\n with open(path, 'r') as f:\n for line in f:\n data = line.strip().split(',')\n\n # Label is a vector with one element per class\n label = [0.01] * 10\n label[int(data[0])] = 0.99\n\n # The data are images of 28x28 pixels\n #image_array = np.asfarray(data[1:]).reshape((28, 28))\n image_array = np.asfarray(data[1:])\n # Normalize all values between [0.01, 1.0]\n image_array = ((image_array) / 255.0 * 0.99) + 0.01\n\n #plt.imshow(image_array, cmap='Greys', interpolation='None')\n yield (image_array, label)", "def load_tmp_atlas(filename):\n fbase, ext = osp.splitext(filename)\n fimg = None\n if osp.isfile(fbase+\".nii\"): fimg = fbase+\".nii\"\n if osp.isfile(fbase+\".nii.gz\"): fimg = fbase+\".nii.gz\" \n\n try:\n img = nib.load(fimg)\n except ValueError as e:\n print(\"error {0}, cannot find file {1} .nii or .nii.gz \".format(fbase, e.errno))\n\n fjson = None\n if osp.isfile(fbase+\".txt\"): fjson= fbase+\".txt\"\n if osp.isfile(fbase+\".json\"): fjson= fbase+\".json\"\n\n if fjson == None:\n warn(\"cannot find file %s .txt or .json\" % fbase)\n return None\n\n with open(fjson) as f:\n j_labels = json.load(f)\n\n a_labels = [label[1] for label in j_labels]\n \n return (img.get_data(), img.get_affine(), a_labels)", "def load_mnist(data_path=None, data_home=None, subsets=None):\n if data_path is None:\n data_path = _utils.validate_data_home(data_home)\n data_path /= 'mnist.npz'\n url = 'https://s3.amazonaws.com/img-datasets/mnist.npz'\n _ds_utils.get_file(data_path, url)\n \n if subsets is None:\n subsets = ['training', 'test']\n subsets = _ds_utils.validate_tvt(subsets, return_list=True)\n X, Y = [], []\n with np.load(data_path) as f:\n for subset in subsets:\n if subset == 'training':\n X.append(f['x_train'])\n Y.append(f['y_train'])\n elif subset == 'test':\n X.append(f['x_test'])\n Y.append(f['y_test'])\n else:\n raise ValueError('Subset:', subset, ' not supported.')\n return np.concatenate(X), np.concatenate(Y)", "def read_image(images_root):\n im_array = np.load(images_root)\n return im_array", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb')as f:\r\n datadict = p.load(f)\r\n \r\n X = datadict['data']\r\n Y = datadict['labels']\r\n \r\n print X.shape\r\n X = X.reshape(X.shape[0], SHAPE[0], SHAPE[1], SHAPE[2])\r\n Y = np.array(Y)\r\n return X, Y", "def load_image(file_name):\n if not osp.exists(file_name):\n print('{} not exist'.format(file_name))\n return\n image = np.asarray(io.imread(file_name))\n if len(image.shape)==3 and image.shape[2]>3:\n image = image[:, :, :3]\n # print(image.shape) #should be (x, x, 3)\n return image", "def tns3d_batch(self, tensorlist, output_resolution):\n dimensions = 3\n formats = 5\n filenames = []\n\n ## record the file dir and corresponding labels\n filelist = open(tensorlist)\n for line in filelist:\n list_seg = line.split()\n filenames.append(list_seg[-1])\n filelist.close()\n\n ## get the batch data of 3d tensors and their basic info\n batch_size = len(filenames)\n tensor_batch = np.zeros((batch_size, dimensions, output_resolution, output_resolution), dtype='int32')\n for findex in range(0, batch_size):\n mapImgs = self.tns3d_Sample(filenames[findex], RES)\n for i in range(0, 3):\n tensor_batch[findex, i, :, :] = mapImgs[i]\n print('**** the {}th tns mapping sampled finished ({} in total) ****'.format(findex, batch_size))\n return tensor_batch", "def read_and_normalize(im_path):\n im3d = io.imread(im_path, plugin=\"tifffile\").astype(np.uint8)\n sh = np.shape(im3d)\n if len(sh) > 3:\n im3d = im3d.reshape(sh[1], sh[2], sh[3])\n return im3d / 255", "def image_load(path) -> numpy.ndarray:\n # file\n na = numpy.array(Image.open(path))\n # fix shape\n na = numpy.moveaxis(na, [2,0,1], [0,1,2])\n # shape is now (3,h,w), add 1\n na = na.reshape(1,3,na.shape[1],na.shape[2])\n # change type\n na = na.astype(\"float32\") / 255.0\n return na", "def create_MNIST_arrays(\n alb_transforms=None, aug_number=1, target_dir=\".\", batch_size=256, num_workers=1\n):\n\n dataset_alb = AlbMNIST(os.path.join(target_dir, \"MNIST\"), download=True)\n dataloader = DataLoader(\n dataset_alb, batch_size=len(dataset_alb), shuffle=False, num_workers=num_workers\n )\n\n print(\"Fetching original dataset...\", end=\" \")\n originals_array = next(iter(dataloader))[\"original\"].numpy()\n labels_array = next(iter(dataloader))[\"label\"].numpy()\n print(\"Done!\")\n\n dataset_alb.set_transofrms(alb_transforms)\n aug_arrays = []\n dataloader = DataLoader(\n dataset_alb, batch_size=batch_size, shuffle=False, num_workers=num_workers\n )\n\n for aug_idx in range(aug_number):\n print(\"Making aug #%i\" % aug_idx)\n aug_arrays.append(compose_array_from_dataloader(dataloader, key=\"augmented\"))\n\n return originals_array, labels_array, aug_arrays", "def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = pickle.load(f, encoding='latin1')\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\r\n Y = np.array(Y)\r\n return X, Y", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n # datadict = p.load(f)\n datadict = pickle.load(f, encoding = 'bytes')\n X = datadict[b'data']\n Y = datadict[b'labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def load_images(mraw, h, w, N, bit=16, roll_axis=True):\n\n if int(bit) == 16:\n images = np.memmap(mraw, dtype=np.uint16, mode='r', shape=(N, h, w))\n elif int(bit) == 8:\n images = np.memmap(mraw, dtype=np.uint8, mode='r', shape=(N, h, w))\n elif int(bit) == 12:\n warnings.warn(\"12bit images will be loaded into memory!\")\n #images = _read_uint12_video(mraw, (N, h, w))\n images = _read_uint12_video_prec(mraw, (N, h, w))\n else:\n raise Exception(f\"Unsupported bit depth: {bit}\")\n\n\n #images=np.fromfile(mraw, dtype=np.uint16, count=h * w * N).reshape(N, h, w) # about a 1/3 slower than memmap when loading to RAM. Also memmap doesn't need to read to RAM but can read from disc when needed.\n if roll_axis:\n return np.rollaxis(images, 0, 3)\n else:\n return images", "def load_images(filelist):\n # pixel value range 0-255\n if not isinstance(filelist, list):\n im = Image.open(filelist).convert('L')\n return np.array(im).reshape(1, im.size[1], im.size[0], 1)\n data = []\n for file in filelist:\n im = Image.open(file).convert('L')\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 1))\n return data", "def read_local(path):\n files = os.listdir(path)\n imgs = []\n for f in files:\n if f.endswith(\".tiff\") or f.endswith(\".tif\"):\n img = Image.open(os.path.join(path, f))\n imgs.append(np.array(img))\n return imgs", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f, encoding='latin1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(\"float64\")\n Y = np.array(Y)\n return X, Y" ]
[ "0.6996989", "0.69236135", "0.68585235", "0.6848802", "0.68018305", "0.6801796", "0.6785937", "0.67511636", "0.6737762", "0.6649627", "0.6620649", "0.6572148", "0.65664387", "0.6564884", "0.6544595", "0.65435225", "0.6539307", "0.65093267", "0.6489088", "0.6486471", "0.6466393", "0.6442627", "0.642544", "0.63891107", "0.63820785", "0.63783324", "0.63695645", "0.63624835", "0.6358522", "0.6350845", "0.6327597", "0.63234466", "0.63218576", "0.6306377", "0.6283855", "0.6274831", "0.6261332", "0.6259758", "0.6251833", "0.6244885", "0.62292695", "0.6207699", "0.6184362", "0.61696994", "0.61560726", "0.614638", "0.61440074", "0.6129719", "0.61212415", "0.61165553", "0.60984194", "0.6097963", "0.60717356", "0.6049447", "0.6036963", "0.6031613", "0.5996555", "0.59681153", "0.5965679", "0.59484005", "0.5943711", "0.59437066", "0.59207684", "0.59113955", "0.5900979", "0.58840895", "0.58830106", "0.58818", "0.5880251", "0.586815", "0.5853163", "0.5852128", "0.5840969", "0.5837958", "0.5836616", "0.5836456", "0.58296245", "0.5827238", "0.5820289", "0.5813015", "0.5807307", "0.5799579", "0.579813", "0.57899207", "0.57870394", "0.5784477", "0.5782243", "0.57811606", "0.57786447", "0.57761085", "0.57737607", "0.5767407", "0.57669175", "0.5762911", "0.5759293", "0.5757081", "0.57570505", "0.57541317", "0.5742615", "0.5730597" ]
0.64559424
21
Calculate the squared L2 norm of the pattern.
def l2_norm(pattern): return np.linalg.norm(pattern)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def L2norm(m):\n return np.sqrt(np.sum(m**2))", "def squared_norm(self) -> float:\n return self.__real**2 + self.__img[0]**2 + self.__img[1]**2 + self.__img[2]**2", "def norm_l2(v):\n return np.sqrt((v**2).sum())", "def norm_L2(u):\n return norm_l2(u)/sqrt(float(u.size))", "def l2_norm(self):\n return (self.x**2 + self.y**2 + self.z**2)**0.5", "def l2_norm(params):\n flattened, _ = flatten(params)\n return np.dot(flattened, flattened)", "def _l2_norm_squared(self, z, theta):\n norms = np.zeros(shape=(len(z), self.n_states))\n\n for j in range(self.n_states):\n diff = theta[:, j] - z # ndarray of shape (n_samples, n_states) with differences\n norms[:, j] = np.square(np.linalg.norm(diff, axis=1)) # squared state conditional l2 norms\n\n return norms # squared l2 norm.", "def norm2(self):\n\t\treturn self.x ** 2 + self.y ** 2 + self.z ** 2", "def l2_norm(self, input):\n input_size = input.size()\n buffer = torch.pow(input, 2)\n normp = torch.sum(buffer, 1).add_(1e-10)\n norm = torch.sqrt(normp)\n _output = torch.div(input, norm.view(-1, 1).expand_as(input))\n output = _output.view(input_size)\n\n return output", "def norm(self):\n mag_squared = self._sum_of_squares()\n return sqrt(mag_squared)", "def l2(vec):\n return np.linalg.norm(vec)", "def L2norm(self, array):\n norm = torch.sqrt(torch.sum(array * array))\n return norm", "def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))", "def norm(self):\n\t\treturn math.sqrt(self.norm2())", "def norm2d(self) -> float:\n\n return self.v2ddict.norm2d()", "def norm2(point):\n return np.sum(point**2, -1)", "def l2_norm(v):\n res = 0\n for e in v:\n res += e * e\n return math.sqrt(res)", "def l2norm_(X, Xstar):\n return cdist(X, Xstar)", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def norm_l2(u):\n return linalg.norm(u.ravel())", "def l2_norm_batch(pattern_stack):\n\n return np.linalg.norm(pattern_stack, axis=0)", "def norm(self):\n\t\treturn np.sqrt(self.normSq())", "def l2norm(array1,array2):\r\n tot = np.sum(np.abs(array1)**2)\r\n return np.sqrt(np.sum(np.abs(array1-array2)**2)/tot)", "def _l2s(self, params):\n return [np.linalg.norm(param) for param in params]", "def normF2(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return LA.norm(X, 'fro')**2", "def normsq(self):\n return sum(x**2 for x in self.data)", "def compute_L2_normalization(xx):\r\n\treturn np.sum(xx ** 2, axis=1)", "def tree_l2_norm(tree_x, squared=False):\n squared_tree = tree_map(jnp.square, tree_x)\n sqnorm = tree_sum(squared_tree)\n if squared:\n return sqnorm\n else:\n return jnp.sqrt(sqnorm)", "def norm(self):\n return sqrt(self.dot(self))", "def normSq(self):\n\t\treturn self.x*self.x+self.y*self.y", "def L2_norm(x, axis=-1):\n return keras.backend.l2_normalize(x, axis=axis)", "def norm(x):\n return np.sqrt(norm2(x))", "def _l2_normalize(x, axis=None, eps=1e-12):\n return x * jax.lax.rsqrt((x * x).sum(axis=axis, keepdims=True) + eps)", "def sigma_norm2( self):\n return self._sigma2", "def calculate_error_l2_norm(self, dY):\n solutions = []\n norm = 0.\n for mi in range(len(self._meshes)):\n for ei in range(len(self._meshes[mi].elements)):\n e = self._meshes[mi].elements[ei]\n # change this to gauss points:\n x_vals, w = p_roots(20)\n norm_e_squared = 0.\n for i, x in enumerate(x_vals):\n norm_e_squared += w[i] * \\\n self.get_sol_value(mi, ei, dY, x,\n count_lift=False)**2\n norm_e_squared *= e.jacobian\n norm += norm_e_squared\n return sqrt(norm)", "def norm(self) -> float:\n return self.squared_norm()**0.5", "def norm_with_l2(original_mat):\n normed_mat = np.zeros(original_mat.shape, dtype=np.float32)\n if len(original_mat.shape) == 2:\n for ind_r in range(original_mat.shape[0]):\n a = np.square(original_mat[ind_r]*1.0)\n b = np.sum(a)\n c = np.sqrt(b)\n normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / c\n # normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / np.sqrt(np.sum(np.square(original_mat[ind_r])*1.0))\n return normed_mat", "def norm2_r(self, a: np.ndarray) -> float:\n return a.T @ a", "def norm(self):\n return math.sqrt(self.dotProduct(self))", "def l2norm(X):\n norm = np.linalg.norm(X, axis=1, keepdims=True)\n return 1.0 * X / norm", "def normsq(self):\n return abs(sum(self._ar * self._ar))", "def norm2(self):\n return getattr(self, self.norm2_name)", "def norm(self):\n return np.sqrt(np.dot(self._data, self._data))", "def inner_product_to_normalized_L2_square(matrix):\n\n length = matrix.shape[0]\n norm = np.divide(1, np.sqrt(l2_square_from_inner_product(matrix)))\n\n normalized_inner_product = np.multiply(np.multiply(np.reshape(norm, [length, 1]), matrix),\n np.reshape(norm, [1, length]))\n return 2 - 2 * normalized_inner_product", "def test_scale_features_L2_norm(self):\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # correct answer computed in Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[0.0304526, 0.409996], [-0.999536, 0.816936], [-0.000485946, 0.40561]])\n\n # perform L2 normalization and check answer\n cdata.scale_features('L2 norm')\n self.assertTrue(allclose(cdata.data, answer))", "def L2_normalize(xx):\r\n\tZx = compute_L2_normalization(xx)\r\n\treturn xx / np.sqrt(Zx[:, np.newaxis])", "def inner_product_to_L2_square(matrix):\n\n length = matrix.shape[0]\n squared_norm = np.reshape(np.diag(matrix), (length, 1))\n\n return squared_norm + np.transpose(squared_norm) - 2 * matrix", "def square_norm(x):\n return np.linalg.norm(x) ** 2", "def two_norm(v):\n return math.sqrt(dot_product(v, v))", "def norm(x):\n return inner_prod(x, x)[0].sqrt_()", "def norm(self):\n C = np.prod([F.T @ F for F in self.factors], axis=0)\n return np.sqrt(np.sum(C))", "def get_sqrt_2():\n return 1.41421356", "def norm(self):\n return math.sqrt(sum([x*x for x in self.mV]))", "def l2_norm(input_x, epsilon=1e-12):\n input_x_norm = input_x/(tf.reduce_sum(input_x**2)**0.5 + epsilon)\n return input_x_norm", "def norm(point):\n return np.sqrt(norm2(point))", "def fast_2_norm(A):\n v = np.random.rand(A.shape[1], 1)\n return la.norm(A.dot(v))", "def normalizeL2(f):\r\n \r\n f=np.array(f)\r\n fsum=np.sum(np.abs(f))\r\n if fsum==0:\r\n fnorm=f\r\n else:\r\n fnorm=f/np.sqrt(np.sum(np.abs(f)**2))\r\n \r\n return fnorm", "def Norm(self):\n \n return sqrt(sum([sum(abs(x)**2) for x in self.__ObjList]))", "def norm(x):\r\n return sqrt(np.numerical.sum(x**2))", "def norm(self):", "def normalize_l2(x):\n return x / (npla.norm(x))", "def norm(self):\n if self._coord_format != constants.MatrixCoordinateDefault:\n self._logger.error(\"invalid coordinate format\")\n raise NotImplementedError(\"invalid coordinate format\")\n\n if self._dtype == complex:\n def __map(m):\n return m[2].real ** 2 + m[2].imag ** 2\n else:\n def __map(m):\n return m[2] ** 2\n\n n = self._data.map(\n __map\n ).reduce(\n lambda a, b: a + b\n )\n\n return math.sqrt(n)", "def normr(Mat):\n B = normalize(Mat, norm='l2', axis=1)\n return B", "def norm_sqr(x):\n return inner_prod(x, x)[0]", "def l2(v, axis=None):\n length = v.shape[0]\n return np.sqrt(np.sum(np.square(v), axis=axis) / length)", "def get_norm(self, l):\n return self._W.norm(l)", "def get_norm(self, l):\n return self._W.norm(l)", "def squared_norm(self, vector, base_point=None):\n sq_norm = self.embedding_metric.squared_norm(vector)\n return sq_norm", "def norm(x):\n return np.sqrt(np.sum(x ** 2))", "def squared_norm(self, vector, base_point=None):\n sq_norm = self.inner_product(vector, vector, base_point)\n return gs.real(sq_norm)", "def norm(self):\n return np.linalg.norm(self.ravel())", "def l2norm(X):\n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n X = torch.div(X, norm)\n return X", "def l2norm(X):\n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n X = torch.div(X, norm)\n return X", "def l2norm(X):\n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n X = torch.div(X, norm)\n return X", "def norm(self):\n norm = self.scalarProduct(self) ** 0.5\n return norm", "def squared_norm(self, vector, base_point=None):\n args = {\n \"vector\": vector,\n \"base_point\": base_point,\n }\n sq_norms = self._iterate_over_factors(\"squared_norm\", args)\n return sum(sq_norms)", "def l2_norm(vec_or_matrix):\n if len(vec_or_matrix.shape) == 1:\n # linear vector\n return vec_or_matrix / np.linalg.norm(vec_or_matrix)\n elif len(vec_or_matrix.shape) == 2:\n return vec_or_matrix / np.linalg.norm(vec_or_matrix, axis=1, ord=2)[:, np.newaxis]\n else:\n raise ValueError('Wrong number of dimensions, 1 or 2 is supported, not %i.' % len(vec_or_matrix.shape))", "def l2(x1, x2):\n return np.sqrt((x1 - x2)**2)", "def weight_l2_norm():\n cumulated_l2_norm = tf.constant(0., dtype=tf.float32)\n for trainable_variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):\n name = trainable_variable.name.split('/')[-1]\n if name.startswith('weights'):\n cumulated_l2_norm += tf.nn.l2_loss(trainable_variable)\n return cumulated_l2_norm", "def residualNorm2(self):\n r2 = (np.dot(self.x,np.dot(self.AtA,self.x)-2.0*self.Atb) + self.btb)*self.scale\n if self.regularizationLambda > 0:\n r2 -= self.regularizationLambda*np.dot(self.x,self.x)\n return r2", "def norm(self):\n raise NotImplementedError", "def getNorm(self, norm=lambda l: (sum(map(lambda x: x ** 2, l))) ** (1 / 2)):\n return norm(self.components)", "def l2norm(X): \n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n a = norm.expand_as(X) + 1e-8\n X = torch.div(X, a) \n return X", "def dist_squared(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n return (self - vec2) * (self - vec2)", "def norm2d(w_in):\n return nn.BatchNorm2d(num_features=w_in, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)", "def l2norm_1d(new, old):\n\tdiff = 0\n\tnx = len(new)\n\tfor i, n in enumerate(new):\n\t\tif n:\n\t\t\tdiff += ((n - old[i])/n)**2\n\tnorm = scipy.sqrt(diff/nx)\n\treturn norm", "def project_L2(w, l):\n return w * min(1, 1 / (l ** (1 / 2.0) * np.linalg.norm(w, 2)))", "def P2l_rec_norm(ells, cost):\n P22 = 3. * (1. - cost**2)\n P23 = 15. * cost * (1. - cost**2)\n P2l = np.zeros(len(ells))\n P2l[0] = 0.\n P2l[1] = 0.\n P2l[2] = P22\n P2l[3] = P23\n P2l_norm = np.copy(P2l)\n P2l_norm[2] *= P2l_norm_prefac(2)\n P2l_norm[3] *= P2l_norm_prefac(3)\n for ell in ells[4:]:\n # print ell, P2l[ell-1], P2l[ell-2]\n a = np.sqrt((4 * ell**2 - 1.) / (ell**2 - 4))\n b = cost * P2l_norm[ell - 1]\n c = np.sqrt(((ell - 1.)**2 - 4) /\n (4 * (ell - 1.)**2 - 1)) * P2l_norm[ell - 2]\n # print a,b,c\n P2l_norm[ell] = a * (b - c)\n # print ell, P2l_norm[ell], P2l_norm_prefac(ell)\n P2l[ell] = P2l_norm[ell] / P2l_norm_prefac(ell)\n return P2l", "def func_val_l1_norm(w):\n\treturn np.linalg.norm(w,ord = 1)", "def norm(self) -> float:\n return numpy.linalg.norm(self.coeff)", "def squaredDistance(vec1, vec2):\n return (distance.euclidean(vec1, vec2))**2", "def norm(self):\n return np.linalg.norm(self.values)", "def l2_normalize(data, axis=-1, eps=1e-6):\n ret = data / (np.linalg.norm(data, axis=axis, keepdims=True) + eps)\n return ret", "def squared_distance_calculator(position1, position2):\r\n difference_vector = position2 - position1\r\n return np.dot(difference_vector, difference_vector)", "def vec_2norm (x):\n return math.sqrt (sum ([x_i**2 for x_i in x]))", "def norm(self):\n return numpy.linalg.norm(self.values)", "def normalize_l2norm(data,tol=0):\n data_sqrt=np.sqrt(np.square(data).sum(axis=1))\n data_sqrt.shape=(data_sqrt.shape[0],1)\n #tol=0#1e-8\n data=data/(data_sqrt+tol)\n return data", "def model_norm(self, order=2) -> float:\n # L-n norm of model where we treat the model as a flat other\n return math.pow(sum([\n torch.pow(layer, order).sum().item()\n for layer in self.parameters\n ]), 1.0 / order)", "def l2_normalize(data, eps, axis=None):\n return cpp.nn.l2_normalize(data, eps, axis)" ]
[ "0.7689054", "0.7469105", "0.7411847", "0.7393104", "0.73873013", "0.73019034", "0.7256206", "0.72420824", "0.71961755", "0.7140405", "0.71325195", "0.71182483", "0.71150076", "0.7069896", "0.7034822", "0.7023806", "0.7019462", "0.69990593", "0.69920164", "0.69680566", "0.6951413", "0.69096494", "0.6767838", "0.6760333", "0.673882", "0.67322636", "0.6731022", "0.6729424", "0.6721004", "0.6703303", "0.6668874", "0.66516876", "0.66384107", "0.65615237", "0.6560211", "0.65599376", "0.6558396", "0.65547085", "0.65506625", "0.65485436", "0.6518504", "0.65143293", "0.6474202", "0.646358", "0.64603555", "0.64586043", "0.6408987", "0.6408933", "0.6386254", "0.63810736", "0.63801146", "0.6365494", "0.63620234", "0.63610953", "0.6342854", "0.6337362", "0.6331644", "0.6296248", "0.62948173", "0.62751955", "0.62665683", "0.62654275", "0.6245794", "0.62226516", "0.62120557", "0.621056", "0.62048686", "0.62048686", "0.61481684", "0.6129907", "0.6125986", "0.61256516", "0.61035", "0.61035", "0.61035", "0.6101315", "0.60767424", "0.6068372", "0.60672754", "0.6066929", "0.6063615", "0.6049005", "0.60451674", "0.6041332", "0.6040449", "0.60394794", "0.6038446", "0.60287476", "0.6020228", "0.6018523", "0.601738", "0.6011987", "0.60094947", "0.6003854", "0.59972584", "0.59932375", "0.5973853", "0.5969759", "0.5957874", "0.59449786" ]
0.8329601
0
Calculate the l2 norm of a stack of patterns.
def l2_norm_batch(pattern_stack): return np.linalg.norm(pattern_stack, axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l2_norm(pattern):\n return np.linalg.norm(pattern)", "def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))", "def l2_norm(params):\n flattened, _ = flatten(params)\n return np.dot(flattened, flattened)", "def L2norm(m):\n return np.sqrt(np.sum(m**2))", "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def _l2_norm_squared(self, z, theta):\n norms = np.zeros(shape=(len(z), self.n_states))\n\n for j in range(self.n_states):\n diff = theta[:, j] - z # ndarray of shape (n_samples, n_states) with differences\n norms[:, j] = np.square(np.linalg.norm(diff, axis=1)) # squared state conditional l2 norms\n\n return norms # squared l2 norm.", "def l2norm(array1,array2):\r\n tot = np.sum(np.abs(array1)**2)\r\n return np.sqrt(np.sum(np.abs(array1-array2)**2)/tot)", "def norm_L2(u):\n return norm_l2(u)/sqrt(float(u.size))", "def l2_norm(self):\n return (self.x**2 + self.y**2 + self.z**2)**0.5", "def _l2s(self, params):\n return [np.linalg.norm(param) for param in params]", "def l2(vec):\n return np.linalg.norm(vec)", "def norm_l2(u):\n return linalg.norm(u.ravel())", "def tree_l2_norm(tree_x, squared=False):\n squared_tree = tree_map(jnp.square, tree_x)\n sqnorm = tree_sum(squared_tree)\n if squared:\n return sqnorm\n else:\n return jnp.sqrt(sqnorm)", "def L2_norm(x, axis=-1):\n return keras.backend.l2_normalize(x, axis=axis)", "def compute_L2_normalization(xx):\r\n\treturn np.sum(xx ** 2, axis=1)", "def norm_l2(v):\n return np.sqrt((v**2).sum())", "def L2norm(self, array):\n norm = torch.sqrt(torch.sum(array * array))\n return norm", "def norm_with_l2(original_mat):\n normed_mat = np.zeros(original_mat.shape, dtype=np.float32)\n if len(original_mat.shape) == 2:\n for ind_r in range(original_mat.shape[0]):\n a = np.square(original_mat[ind_r]*1.0)\n b = np.sum(a)\n c = np.sqrt(b)\n normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / c\n # normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / np.sqrt(np.sum(np.square(original_mat[ind_r])*1.0))\n return normed_mat", "def l2_norm(self, input):\n input_size = input.size()\n buffer = torch.pow(input, 2)\n normp = torch.sum(buffer, 1).add_(1e-10)\n norm = torch.sqrt(normp)\n _output = torch.div(input, norm.view(-1, 1).expand_as(input))\n output = _output.view(input_size)\n\n return output", "def l2_norm(v):\n res = 0\n for e in v:\n res += e * e\n return math.sqrt(res)", "def norm2(self):\n\t\treturn self.x ** 2 + self.y ** 2 + self.z ** 2", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def l2norm_(X, Xstar):\n return cdist(X, Xstar)", "def weight_l2_norm():\n cumulated_l2_norm = tf.constant(0., dtype=tf.float32)\n for trainable_variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):\n name = trainable_variable.name.split('/')[-1]\n if name.startswith('weights'):\n cumulated_l2_norm += tf.nn.l2_loss(trainable_variable)\n return cumulated_l2_norm", "def normF2(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return LA.norm(X, 'fro')**2", "def norm2(point):\n return np.sum(point**2, -1)", "def norm2d(self) -> float:\n\n return self.v2ddict.norm2d()", "def inner_product_to_normalized_L2_square(matrix):\n\n length = matrix.shape[0]\n norm = np.divide(1, np.sqrt(l2_square_from_inner_product(matrix)))\n\n normalized_inner_product = np.multiply(np.multiply(np.reshape(norm, [length, 1]), matrix),\n np.reshape(norm, [1, length]))\n return 2 - 2 * normalized_inner_product", "def _l2_normalize(x, axis=None, eps=1e-12):\n return x * jax.lax.rsqrt((x * x).sum(axis=axis, keepdims=True) + eps)", "def l2norm(X):\n norm = np.linalg.norm(X, axis=1, keepdims=True)\n return 1.0 * X / norm", "def l2_normalize(data, eps, axis=None):\n return cpp.nn.l2_normalize(data, eps, axis)", "def calculate_error_l2_norm(self, dY):\n solutions = []\n norm = 0.\n for mi in range(len(self._meshes)):\n for ei in range(len(self._meshes[mi].elements)):\n e = self._meshes[mi].elements[ei]\n # change this to gauss points:\n x_vals, w = p_roots(20)\n norm_e_squared = 0.\n for i, x in enumerate(x_vals):\n norm_e_squared += w[i] * \\\n self.get_sol_value(mi, ei, dY, x,\n count_lift=False)**2\n norm_e_squared *= e.jacobian\n norm += norm_e_squared\n return sqrt(norm)", "def l2norm(X, dim=-1, eps=1e-8):\r\n norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps\r\n X = torch.div(X, norm)\r\n return X", "def l2(v, axis=None):\n length = v.shape[0]\n return np.sqrt(np.sum(np.square(v), axis=axis) / length)", "def get_norms(self):\n l1_sum = 0\n l2_sum = 0\n actives = 0\n for lbl in self.labels:\n for fid in self.w[lbl]:\n # apply and remaing L1 penalities at the end of training.\n alpha = self.s - self.lastW[lbl].get(fid,0)\n self.w[lbl][fid] = self.w[lbl].get(fid, 0) - alpha\n weight = self.w[lbl][fid]\n l1_sum += weight if weight > 0 else -weight\n l2_sum += weight * weight\n if weight != 0:\n actives += 1\n l2_sum = math.sqrt(l2_sum)\n return (l1_sum,l2_sum,actives)", "def normr(Mat):\n B = normalize(Mat, norm='l2', axis=1)\n return B", "def l2_norm(vec_or_matrix):\n if len(vec_or_matrix.shape) == 1:\n # linear vector\n return vec_or_matrix / np.linalg.norm(vec_or_matrix)\n elif len(vec_or_matrix.shape) == 2:\n return vec_or_matrix / np.linalg.norm(vec_or_matrix, axis=1, ord=2)[:, np.newaxis]\n else:\n raise ValueError('Wrong number of dimensions, 1 or 2 is supported, not %i.' % len(vec_or_matrix.shape))", "def l2_normalize(data, axis=-1, eps=1e-6):\n ret = data / (np.linalg.norm(data, axis=axis, keepdims=True) + eps)\n return ret", "def P2l_rec_norm(ells, cost):\n P22 = 3. * (1. - cost**2)\n P23 = 15. * cost * (1. - cost**2)\n P2l = np.zeros(len(ells))\n P2l[0] = 0.\n P2l[1] = 0.\n P2l[2] = P22\n P2l[3] = P23\n P2l_norm = np.copy(P2l)\n P2l_norm[2] *= P2l_norm_prefac(2)\n P2l_norm[3] *= P2l_norm_prefac(3)\n for ell in ells[4:]:\n # print ell, P2l[ell-1], P2l[ell-2]\n a = np.sqrt((4 * ell**2 - 1.) / (ell**2 - 4))\n b = cost * P2l_norm[ell - 1]\n c = np.sqrt(((ell - 1.)**2 - 4) /\n (4 * (ell - 1.)**2 - 1)) * P2l_norm[ell - 2]\n # print a,b,c\n P2l_norm[ell] = a * (b - c)\n # print ell, P2l_norm[ell], P2l_norm_prefac(ell)\n P2l[ell] = P2l_norm[ell] / P2l_norm_prefac(ell)\n return P2l", "def sigma_norm2( self):\n return self._sigma2", "def l2_normalization(inputs, scaling=True):\n with tf.variable_scope('L2Normalization'):\n inputs_shape = inputs.get_shape()\n channel_shape = inputs_shape[-1:]\n # cal l2_norm on channel\n outputs = tf.nn.l2_normalize(inputs, 3, epsilon=1e-12)\n # scalling\n if scaling:\n # scale.shape == channel.shape\n scale = slim.variable('gamma', channel_shape, tf.float32, tf.constant_initializer(1.0))\n outputs = tf.multiply(outputs, scale)\n\n return outputs", "def fast_2_norm(A):\n v = np.random.rand(A.shape[1], 1)\n return la.norm(A.dot(v))", "def test_scale_features_L2_norm(self):\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # correct answer computed in Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[0.0304526, 0.409996], [-0.999536, 0.816936], [-0.000485946, 0.40561]])\n\n # perform L2 normalization and check answer\n cdata.scale_features('L2 norm')\n self.assertTrue(allclose(cdata.data, answer))", "def norms(Z):\n return Z.view(Z.shape[0], -1).norm(dim=1)[:,None,None,None]", "def norm2_r(self, a: np.ndarray) -> float:\n return a.T @ a", "def normalizeL2(f):\r\n \r\n f=np.array(f)\r\n fsum=np.sum(np.abs(f))\r\n if fsum==0:\r\n fnorm=f\r\n else:\r\n fnorm=f/np.sqrt(np.sum(np.abs(f)**2))\r\n \r\n return fnorm", "def L2_func(x):\n return K.expand_dims(K.sqrt(K.sum(K.pow(x,2), axis=1)))", "def L2_normalize(xx):\r\n\tZx = compute_L2_normalization(xx)\r\n\treturn xx / np.sqrt(Zx[:, np.newaxis])", "def inner_product_to_L2_square(matrix):\n\n length = matrix.shape[0]\n squared_norm = np.reshape(np.diag(matrix), (length, 1))\n\n return squared_norm + np.transpose(squared_norm) - 2 * matrix", "def project_L2(w, l):\n return w * min(1, 1 / (l ** (1 / 2.0) * np.linalg.norm(w, 2)))", "def l2_distance(v1, v2):\n\treturn np.linalg.norm(np.array(v1) - np.array(v2))", "def l2_norm(input_x, epsilon=1e-12):\n input_x_norm = input_x/(tf.reduce_sum(input_x**2)**0.5 + epsilon)\n return input_x_norm", "def l2norm_1d(new, old):\n\tdiff = 0\n\tnx = len(new)\n\tfor i, n in enumerate(new):\n\t\tif n:\n\t\t\tdiff += ((n - old[i])/n)**2\n\tnorm = scipy.sqrt(diff/nx)\n\treturn norm", "def l2(x1, x2):\n return np.sqrt((x1 - x2)**2)", "def normalize_l2(x):\n return x / (npla.norm(x))", "def l2norm(X):\n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n X = torch.div(X, norm)\n return X", "def l2norm(X):\n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n X = torch.div(X, norm)\n return X", "def l2norm(X):\n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n X = torch.div(X, norm)\n return X", "def l2norm(X): \n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n a = norm.expand_as(X) + 1e-8\n X = torch.div(X, a) \n return X", "def two_norm(v):\n return math.sqrt(dot_product(v, v))", "def gradient_nD(stack):\n # Convert for 64-bit to avoid large number problems in squares.\n stack = np.copy(stack)\n stack = stack.astype(np.float64)\n sumsq = ndi.filters.sobel(stack, axis=0) ** 2\n for d in range(1, stack.ndim):\n sumsq = sumsq + (ndi.filters.sobel(stack, axis=d) ** 2)\n gradient = np.sqrt(sumsq)\n return gradient", "def norm2d(w_in):\n return nn.BatchNorm2d(num_features=w_in, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)", "def compute_model_diff_squared_norm(model1: torch.nn.Module, model2: torch.nn.Module):\n tensor1 = list(model1.parameters())\n tensor2 = list(model2.parameters())\n norm = sum([torch.sum((tensor1[i] - tensor2[i]) ** 2) for i in range(len(tensor1))])\n\n return norm", "def model_norm(self, order=2) -> float:\n # L-n norm of model where we treat the model as a flat other\n return math.pow(sum([\n torch.pow(layer, order).sum().item()\n for layer in self.parameters\n ]), 1.0 / order)", "def normalize_l2norm(data,tol=0):\n data_sqrt=np.sqrt(np.square(data).sum(axis=1))\n data_sqrt.shape=(data_sqrt.shape[0],1)\n #tol=0#1e-8\n data=data/(data_sqrt+tol)\n return data", "def norm(self):", "def l2(weights, name=None):\n with ops.op_scope([weights], name, 'l2_regularizer') as scope:\n my_scale = ops.convert_to_tensor(scale,\n dtype=weights.dtype.base_dtype,\n name='scale')\n return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)", "def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):\n if filters is None:\n filters = shape_list(x)[-1]\n with tf.variable_scope(name, default_name=\"l2_norm\", values=[x], reuse=reuse):\n scale = tf.get_variable(\n \"l2_norm_scale\", [filters], initializer=tf.ones_initializer())\n bias = tf.get_variable(\n \"l2_norm_bias\", [filters], initializer=tf.zeros_initializer())\n epsilon, scale, bias = [cast_like(t, x)\n for t in [epsilon, scale, bias]]\n mean = tf.reduce_mean(x, axis=[-1], keepdims=True)\n l2norm = tf.reduce_sum(tf.square(x - mean), axis=[-1], keepdims=True)\n norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)\n return norm_x * scale + bias", "def L2_dists(x, y):\n #print(x.shape)\n #print(y.shape)\n dists = -2 * np.matmul(x, y.T)\n dists += np.sum(x**2)[np.newaxis]\n dists += np.sum(y**2)\n return np.sqrt(dists)", "def l2_reg_cost(cost, lambtha, weights, L, m):\n enorm = 0\n for i in range(1, L + 1):\n layer = 'W{}'.format(i)\n enorm += np.linalg.norm(weights[layer])\n return cost + (lambtha / (2 * m)) * enorm", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def norm(self):\n\t\treturn math.sqrt(self.norm2())", "def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)", "def norm2(self):\n return getattr(self, self.norm2_name)", "def getNormLaplacian(W):\n\td=[np.sum(row) for row in W]\n\tD=np.diag(d)\n\tL=D-W\n\t#Dn=D^(-1/2)\n\tDn=np.power(np.linalg.matrix_power(D,-1),0.5)\n\tLbar=np.dot(np.dot(Dn,L),Dn)\n\treturn Lbar", "def l2_regularization(W, reg_strength):\n # TODO: Copy from the previous assignment\n loss = reg_strength*np.sum(W*W)\n grad = 2*reg_strength*W\n return loss, grad", "def norm(self):\n mag_squared = self._sum_of_squares()\n return sqrt(mag_squared)", "def squared_norm(self) -> float:\n return self.__real**2 + self.__img[0]**2 + self.__img[1]**2 + self.__img[2]**2", "def difference_models_norm_2(model_1, model_2):\n \n tensor_1=list(model_1.parameters())\n tensor_2=list(model_2.parameters())\n \n norm=sum([torch.sum((tensor_1[i]-tensor_2[i])**2) \n for i in range(len(tensor_1))])\n \n return norm", "def _calc_r2(self):\n sse = np.sum((self.data.y - self.predict(self.data.x))**2)\n sst = np.sum((self.data.y - self.data.y.mean())**2)\n return (1. - sse/sst)", "def normsq(self):\n return sum(x**2 for x in self.data)", "def norm(imagestack, mean, std):\n \n new_im = (imagestack - mean)/std \n \n return new_im", "def tlbr_norm(self):\n ret = self.tlwh()\n ret[2:] += ret[:2] # tlbr\n ret[0] /= self.im_shape[1] # im_shape: [height, width]\n ret[1] /= self.im_shape[0]\n ret[2] /= self.im_shape[1]\n ret[3] /= self.im_shape[0]\n\n return ret", "def l2_reg_cost(cost, lambtha, weights, L, m):\n w_norm = 0\n for i in range(1, L + 1):\n w_norm += np.linalg.norm(weights['W' + str(i)])\n L2 = cost + (lambtha / (2 * m) * w_norm)\n return L2", "def l2distance(X, Z=None):\n\n if Z is None:\n n, d = X.shape\n s1 = np.sum(np.power(X, 2), axis=1).reshape(-1,1)\n D1 = -2 * np.dot(X, X.T) + repmat(s1, 1, n)\n D = D1 + repmat(s1.T, n, 1)\n np.fill_diagonal(D, 0)\n D = np.sqrt(np.maximum(D, 0))\n else:\n n, d = X.shape\n m, _ = Z.shape\n s1 = np.sum(np.power(X, 2), axis=1).reshape(-1,1)\n s2 = np.sum(np.power(Z, 2), axis=1).reshape(1,-1)\n D1 = -2 * np.dot(X, Z.T) + repmat(s1, 1, m)\n D = D1 + repmat(s2, n, 1)\n D = np.sqrt(np.maximum(D, 0))\n return D", "def l2_reg_cost(cost, lambtha, weights, L, m):\n f = 0\n while (L):\n index = \"W{}\".format(L)\n weight = weights[index]\n f += np.linalg.norm(weight)\n L -= 1\n return cost + lambtha / (2 * m) * f", "def norm(self):\n return sqrt(self.dot(self))", "def getNorm(self, norm=lambda l: (sum(map(lambda x: x ** 2, l))) ** (1 / 2)):\n return norm(self.components)", "def l2_regularization(cg, rate=0.01):\n W = VariableFilter(roles=[WEIGHT])(cg.variables)\n L2_cost = rate * l2_norm(W)\n\n return L2_cost", "def layer_norm(self, index, order=2) -> float:\n # L-n norms of layer where we treat each layer as a flat other\n return math.pow(torch.pow(self.parameters[index], order).sum().item(), 1.0 / order)", "def compute_Rg(L):\n\treturn np.sqrt(np.dot(L,L))", "def get_norm(self, l):\n return self._W.norm(l)", "def get_norm(self, l):\n return self._W.norm(l)", "def square_dist(X, X2=None, ls=1.):\n N, D = X.shape\n\n X = X / ls\n Xs = tf.reduce_sum(tf.square(X), axis=1)\n\n if X2 is None:\n dist = -2 * tf.matmul(X, X, transpose_b=True)\n dist += tf.reshape(Xs, (-1, 1)) + tf.reshape(Xs, (1, -1))\n return tf.clip_by_value(dist, 0., np.inf)\n\n N2, D2 = X2.shape\n if D != D2:\n raise ValueError('Dimension of X and X2 does not match.')\n\n X2 = X2 / ls\n X2s = tf.reduce_sum(tf.square(X2), axis=1)\n dist = -2 * tf.matmul(X, X2, transpose_b=True)\n dist += tf.reshape(Xs, (-1, 1)) + tf.reshape(X2s, (1, -1))\n return tf.clip_by_value(dist, 0., np.inf)", "def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))", "def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))", "def normsq(self):\n return abs(sum(self._ar * self._ar))", "def l2_reg_cost(cost, lambtha, weights, L, m):\n sumWeights = 0\n for i in range(1, L + 1):\n sumWeights += np.linalg.norm(weights['W' + str(i)])\n return cost + sumWeights * lambtha / (2 * m)", "def Norm(self):\n \n return sqrt(sum([sum(abs(x)**2) for x in self.__ObjList]))", "def norm_distance(self):\n graph_size = self.N + self.M\n return self.distance() / (1. * graph_size)" ]
[ "0.74098885", "0.6808609", "0.67808706", "0.67259216", "0.65520793", "0.65451896", "0.6460118", "0.64029825", "0.6392266", "0.62797666", "0.627647", "0.626877", "0.62292206", "0.6221982", "0.6214867", "0.62103754", "0.61965305", "0.61522037", "0.6128999", "0.6110056", "0.6074128", "0.6052297", "0.60296553", "0.6021897", "0.60171294", "0.5888251", "0.58822685", "0.58049625", "0.5804344", "0.5796051", "0.5784968", "0.5784316", "0.57818305", "0.5780563", "0.5775568", "0.5771007", "0.5754276", "0.57529444", "0.574019", "0.5738888", "0.57220197", "0.5706875", "0.56947917", "0.56229496", "0.5589051", "0.5576343", "0.556107", "0.5560761", "0.5553708", "0.5543217", "0.55342054", "0.5530855", "0.5519885", "0.54833406", "0.54800487", "0.54642594", "0.54642594", "0.54642594", "0.5454233", "0.54460615", "0.54442286", "0.54216623", "0.54165", "0.5406305", "0.53955823", "0.53784823", "0.53740174", "0.5368133", "0.5360109", "0.5359022", "0.5339425", "0.5325685", "0.53218764", "0.53174543", "0.5314816", "0.5304645", "0.5288519", "0.5283318", "0.5281344", "0.52803016", "0.52776283", "0.5276568", "0.52735907", "0.52517354", "0.52136445", "0.5182811", "0.5175183", "0.516102", "0.51528597", "0.51504815", "0.513328", "0.5125897", "0.5125897", "0.5123865", "0.51198125", "0.51198125", "0.51187766", "0.51139176", "0.51117694", "0.5109401" ]
0.83394605
0
Calculate the inner product of the two patterns as a vecter.
def inner_product(pattern_one, pattern_two): return np.sum(np.multiply(pattern_one, pattern_two))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two):\n\n \"\"\"\n Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the \n other half.\n \"\"\"\n holder = np.zeros((pattern_num_one, pattern_num_two))\n for l in range(pattern_num_one):\n for m in range(pattern_num_two):\n holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m]))\n\n return holder", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def dotProduct(v1, v2):\n return sum((a * b) for a, b in zip(v1, v2))", "def dotproduct(v1, v2):\n\treturn sum(imap(operator.mul, v1, v2))", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def __mul__(self, other):\n return Vec2d(self.v[0] * other, self.v[1] * other)", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def dot_product(a,b):\n return sum(pairwise_mult(a,b))", "def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def inner_product_similarity(a: torch.Tensor, b: torch.Tensor, dim=1) -> torch.Tensor:\n outputs = (a * b).sum(dim=dim)\n return outputs", "def dot_product(v1, v2):\n return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]", "def dotproduct(x, y):\n return sum(imap(operator.mul, x, y))", "def dotProduct(vectorA, vectorB):\r\n product =0\r\n for i in range(len(vectorA)):\r\n product += eval(vectorA[i])*eval(vectorB[i])\r\n return product", "def dotproduct(vec1, vec2):\n import operator\n return sum(map(operator.mul, vec1, vec2))", "def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def _inner_product_a2(\n self, tangent_vec_a, tangent_vec_b, base_point, vertex_areas_bp\n ):\n laplacian_at_base_point = self._space.laplacian(base_point)\n return self.a2 * gs.sum(\n gs.einsum(\n \"...bi,...bi->...b\",\n laplacian_at_base_point(tangent_vec_a),\n laplacian_at_base_point(tangent_vec_b),\n )\n / vertex_areas_bp,\n axis=-1,\n )", "def _inner_product_c1(self, point_a, point_b, normals_bp, areas_bp):\n dna = self._space.normals(point_a) - normals_bp\n dnb = self._space.normals(point_b) - normals_bp\n return self.c1 * gs.sum(\n gs.einsum(\"...bi,...bi->...b\", dna, dnb) * areas_bp, axis=-1\n )", "def dot_product(vector1, vector2):\n return [reduce_by_multiplication(pair) for pair in zip(vector1, vector2)]", "def dot_product(v1, v2):\n return v1[0] * v2[0] + v1[1] * v2[1]", "def dot_product(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot4(a,b):\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3]", "def _mulVectors(X1,X2):\n _checkSize(X1,X2)\n return sum([ X1[i] * X2[i] for i in range(len(X1))])", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def inner_product(self, tangent_vec_a, tangent_vec_b, base_point):\n inner_prod_mat = self.metric_matrix(base_point)\n aux = gs.einsum(\"...j,...jk->...k\", gs.conj(tangent_vec_a), inner_prod_mat)\n return gs.dot(aux, tangent_vec_b)", "def _qij_vec_inner(a: int, b: int, i: int, j: int):\n vec_dagger = _qij_vec_dagger(a, b)\n vec = _qij_vec(i, j)\n sum_result = FermionOperator()\n for idx, term in enumerate(vec):\n sum_result += term * vec_dagger[idx]\n return sum_result", "def vec_dot(x, y):\r\n return sum(a * b for a, b in zip(x, y))", "def dot_product(first_vector, second_vector):\n first_unpacker = VectorUnpacker(first_vector)\n second_unpacker = VectorUnpacker(second_vector)\n if first_unpacker.unpacked_vector_length != second_unpacker.unpacked_vector_length:\n raise ApplicationError(\"Unpacked vector sizes are unequal\")\n\n # looks better than a 'map' one-liner to me\n value = 0\n for piece in zip(first_unpacker(), second_unpacker()):\n value += piece[0] * piece[1]\n\n return value", "def inner_product(alpha, F, beta):\n return np.dot(alpha, np.dot(F, beta))", "def dotproduct(vec1, vec2):\n return sum((a*b) for a, b in zip(vec1, vec2))", "def dot_product(v,w):\n return v[0] * w[0] + v[1] * w[1]", "def dotProduct(v1, v2):\n n1 = normalize(v1)\n n2 = normalize(v2)\n return n1[0] * n2[0] + n1[1] * n2[1] + n1[2] * n2[2]", "def __mul__(self,v2):\n\t\tif(isinstance(v2,Vect2D)):\n\t\t\treturn np.dot(self._vec,v2._vec)\n\t\telse:\n\t\t\treturn Vect2D(v2*self._vec)", "def product(self, x, y):\n return self( x.lift() * y.lift() )", "def dot(self,other):\n if len(self) == len(other):\n res = 0\n for a,b in zip(self,other):\n res += a*b\n return res\n else: \n raise ValueError(\"The length is not matched\")", "def dot(a, b):\n return sum([a[i]*b[i] for i in range(2)])", "def _inner_product_d1(\n self, one_forms_a, one_forms_b, one_forms_bp, areas_bp, inv_surface_metrics_bp\n ):\n one_forms_bp_t = gs.transpose(one_forms_bp, (0, 2, 1))\n\n one_forms_a_t = gs.transpose(one_forms_a, (0, 1, 3, 2))\n xa = one_forms_a_t - one_forms_bp_t\n\n xa_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xa, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xa),\n )\n\n one_forms_b_t = gs.transpose(one_forms_b, (0, 1, 3, 2))\n xb = one_forms_b_t - one_forms_bp_t\n xb_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xb, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xb),\n )\n\n return self.d1 * gs.sum(\n gs.einsum(\n \"...bii->...b\",\n gs.matmul(\n xa_0,\n gs.matmul(\n inv_surface_metrics_bp, gs.transpose(xb_0, axes=(0, 1, 3, 2))\n ),\n ),\n )\n * areas_bp\n )", "def dot( v1, v2 ):\n return sum( x*y for x,y in izip(v1,v2) )", "def dot_prod(u,v):\n each_product = []\n for i in range(len(u)):\n each_product.append(u[i] * v[i])\n return sum(each_product)", "def dot_product(v1, v2):\n #print(v1, v2)\n sum = 0\n\n for i in range(len(v1)):\n #print(v1[i], v2[i])\n sum += v1[i] * v2[i]\n return sum", "def dot_product(a, b):\n dp = 0.0\n for i, j in zip(a, b):\n dp += i * j\n return dp", "def dot(v,w):\n return sum(v_i * w_i for v_i,w_i in zip(v,w))", "def mul(Z,X,Y):", "def dot(v,w):\r\n return sum(v_i * w_i\r\n for v_i, w_i in zip(v, w))", "def inner_product(self, tangent_vec_a, tangent_vec_b, base_point):\n args = {\n \"tangent_vec_a\": tangent_vec_a,\n \"tangent_vec_b\": tangent_vec_b,\n \"base_point\": base_point,\n }\n inner_products = self._iterate_over_factors(\"inner_product\", args)\n return sum(inner_products)", "def __mul__(self, other):\n x = self.x * other\n y = self.y * other\n return vec(x, y)", "def prod(self, x, y):\n return (self.basic_operation.reduce(x.original+y.original),\n self.operation1.prod(x.left, y.left),\n self.operation2.prod(x.right, y.right))", "def dotproduct(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return (first.x*other.x + first.y*other.y + first.z*other.z)", "def crossproduct(first, other=FreeCAD.Vector(0,0,1)):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return FreeCAD.Vector(first.y*other.z - first.z*other.y, first.z*other.x - first.x*other.z, first.x*other.y - first.y*other.x)", "def inner(self, a: np.ndarray, b: np.ndarray) -> float:\n return a.T @ (self.mass @ b)", "def pair_product(x1, x2):\n return np.multiply(x1, x2)", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def innerprod_q2(q1, q2):\n T = q1.shape[1]\n val = sum(sum(q1 * q2)) / T\n\n return (val)", "def _inner_product_a1(self, ginvdga, ginvdgb, areas_bp):\n return self.a1 * gs.sum(\n gs.einsum(\"...bii->...b\", gs.matmul(ginvdga, ginvdgb)) * areas_bp,\n axis=-1,\n )", "def vec_dot_star(v1,v2):\r\n \r\n dot_star = v1[0]*(v2[1])-v1[1]*v2[0]\r\n return dot_star", "def vecDot(a, b):\n ret=0.0\n for i in range(len(a)):\n ret+=a[i]*b[i]\n return ret", "def sparseVectorDotProduct(v1, v2):\n # BEGIN_YOUR_CODE (our solution is 4 lines of code, but don't worry if you deviate from this)\n return sum(v1[k]*v2[k] for k in v1 and v2)\n # END_YOUR_CODE", "def dot(v,w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v,w))", "def _prod_vectorized(M1, M2):\n sh1 = M1.shape\n sh2 = M2.shape\n assert len(sh1) >= 2\n assert len(sh2) >= 2\n assert sh1[-1] == sh2[-2]\n\n ndim1 = len(sh1)\n t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]\n return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *\n M2[..., np.newaxis, :], -3)", "def dot_product(vec_1:tuple, vec_2:tuple)->float:\n return vec_1[0] * vec_2[0] + vec_1[1] * vec_2[1]", "def same_side_product(p, q, a, b):\n return line_ccw(a, b, p) * line_ccw(a, b, q)", "def vdot(a, b):\n return np.vdot(a.ravel(), b.ravel())", "def __mul__(self,other):\n if type(other) is Vector:\n return(self.x*other.x + self.y*other.y + self.z*other.z)\n else:\n return(Vector(self.x*other,self.y*other,self.z*other))", "def _dot(a, b):\n return np.einsum('ijk,ikl->ijl', a, b)", "def sparseVectorDotProduct(a, b):\n # BEGIN_YOUR_CODE (our solution is 4 lines of code, but don't worry if you deviate from this)\n ref = a if len(a)>len(b) else b\n return sum(a[key]*b[key] for key in ref)\n # END_YOUR_CODE", "def basic_geometric_product(obj1, obj2):\n def mul_table(b1, b2):\n return MV.base_mul_table[(b1, b2)]\n\n obj12 = bilinear_product(obj1 * obj2, mul_table)\n\n return obj12", "def __mul__(self, other):\n return sum(self._ar * other._ar)", "def get_dot_product(v1,v2):\n #sets default dot product\n dot_product = 0\n \n for key in v2:\n if key in v1:\n # updates the dot product if key is present in both vectors\n dot_product += v1[key]*v2[key]\n #returns final dot product\n return dot_product", "def dot(a,b):\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]", "def _inner_product_a0(self, tangent_vec_a, tangent_vec_b, vertex_areas_bp):\n return self.a0 * gs.sum(\n vertex_areas_bp\n * gs.einsum(\"...bi,...bi->...b\", tangent_vec_a, tangent_vec_b),\n axis=-1,\n )", "def dot(self,v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def test_l2_metric_inner_product_vectorization(\n self,\n l2_metric_s2,\n times,\n n_landmark_sets,\n landmarks_a,\n landmarks_b,\n landmarks_c,\n ):\n landmarks_ab = l2_metric_s2.geodesic(landmarks_a, landmarks_b)\n landmarks_bc = l2_metric_s2.geodesic(landmarks_b, landmarks_c)\n landmarks_ab = landmarks_ab(times)\n landmarks_bc = landmarks_bc(times)\n\n tangent_vecs = l2_metric_s2.log(point=landmarks_bc, base_point=landmarks_ab)\n\n result = l2_metric_s2.inner_product(tangent_vecs, tangent_vecs, landmarks_ab)\n\n self.assertAllClose(gs.shape(result), (n_landmark_sets,))", "def complex_mul2d(a, b):\n op = partial(torch.einsum, \"bixy,ioxy->boxy\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def __mul__(self, other):\n\n newlist = [v for v in self.args]\n for i, v in enumerate(newlist):\n newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1])\n return Vector(newlist)", "def __mul__(self, other):\n if isinstance(other, Vector):\n return self.dot(other)\n else:\n raise TypeError(other)", "def __mul__(self, vs) -> List[Tuple[np.ndarray, np.ndarray]]:\n return sum([[*map(lambda u: (u, v), self.__elements)] for v in vs], [])", "def robust_outer_product(vec_1, vec_2):\n mantissa_1, exponents_1 = np.frexp(vec_1)\n mantissa_2, exponents_2 = np.frexp(vec_2)\n new_mantissas = mantissa_1[None, :] * mantissa_2[:, None]\n new_exponents = exponents_1[None, :] + exponents_2[:, None]\n return new_mantissas * np.exp2(new_exponents)", "def product(self, x, y):\n return self._cached_product(x.value, y.value)", "def outer_product(A, B): \n print(A)\n print(B)\n A_rows = len(A)\n A_columns = len(A[0])\n\n B_rows = len(B)\n B_columns = len(B[0])\n \n if A_columns == 1 and B_rows == 1:\n \n outer_product = []\n\n # multi-line list comprehension for outer product\n [outer_product.append([A[i][0] * B[0][j] for j in range(B_columns)]) \n for i in range(A_rows)]\n\n return outer_product\n\n else:\n print(\"dimensions of vector do not match.\")", "def _inner_product_b1(self, ginvdga, ginvdgb, areas_bp):\n return self.b1 * gs.sum(\n gs.einsum(\"...bii->...b\", ginvdga)\n * gs.einsum(\"...bii->...b\", ginvdgb)\n * areas_bp,\n axis=-1,\n )", "def product(self):\n return self.right[self.i:] + self.left[:self.i], self.left[self.i:] + self.right[:self.i]", "def dot_product(u, v):\n sum_of_products = 0\n if u!= None:\n if v!= None:\n for combo in zip(u, v):\n sum_of_products += (combo[0] * combo[1])\n return sum_of_products", "def dot(a, b):\n return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]", "def product_on_basis(self, t1, t2):\n return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.", "def proyeccion(v1, v2):\n prod = np.dot(v1, v2)\n N = np.zeros((len(prod), len(v2)))\n\n for i in range(len(N)):\n N[i, :] = prod[i] * v2\n return N", "def dot_product(A, B):\n A_rows = len(A)\n A_columns = len(A[0])\n\n B_rows = len(B)\n B_columns = len(B[0])\n\n if (A_columns == B_rows) and (A_rows == 1 and B_columns == 1):\n\n dot_product = []\n \n dot_product.append(sum([A[0][i]*B[i][0] for i in range(A_columns)]))\n\n return float(dot_product)\n \n else:\n print(\"dimensions of vector do not match.\")", "def vec_product(vec1: List[int], vec2: List[int]) -> int:\n return sum(map(lambda v1, v2: v1 * v2, vec1, vec2))", "def dot(v, w):\n\treturn sum(v_i * w_i for v_i, w_i in zip(v, w))", "def __mul__(self, other):\r\n return self.prod(other)", "def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)", "def dot(v, w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v, w))", "def inner_product(self, tangent_vec_a, tangent_vec_b, base_point):\n to_squeeze = False\n if tangent_vec_a.ndim == 2 and tangent_vec_b.ndim == 2:\n to_squeeze = True\n if tangent_vec_a.ndim == 2:\n tangent_vec_a = gs.expand_dims(tangent_vec_a, axis=0)\n if tangent_vec_b.ndim == 2:\n tangent_vec_b = gs.expand_dims(tangent_vec_b, axis=0)\n\n point_a = base_point + tangent_vec_a\n point_b = base_point + tangent_vec_b\n inner_prod = gs.zeros((gs.maximum(len(tangent_vec_a), len(tangent_vec_b)), 1))\n if self.a0 > 0 or self.a2 > 0:\n vertex_areas_bp = self._space.vertex_areas(base_point)\n if self.a0 > 0:\n inner_prod += self._inner_product_a0(\n tangent_vec_a, tangent_vec_b, vertex_areas_bp=vertex_areas_bp\n )\n if self.a2 > 0:\n inner_prod += self._inner_product_a2(\n tangent_vec_a,\n tangent_vec_b,\n base_point=base_point,\n vertex_areas_bp=vertex_areas_bp,\n )\n if self.a1 > 0 or self.b1 > 0 or self.c1 > 0 or self.b1 > 0:\n one_forms_bp = self._space.surface_one_forms(base_point)\n surface_metrics_bp = self._space._surface_metric_matrices_from_one_forms(\n one_forms_bp\n )\n normals_bp = self._space.normals(base_point)\n areas_bp = gs.sqrt(gs.linalg.det(surface_metrics_bp))\n\n if self.c1 > 0:\n inner_prod += self._inner_product_c1(\n point_a, point_b, normals_bp, areas_bp\n )\n if self.d1 > 0 or self.b1 > 0 or self.a1 > 0:\n ginv_bp = gs.linalg.inv(surface_metrics_bp)\n one_forms_a = self._space.surface_one_forms(point_a)\n one_forms_b = self._space.surface_one_forms(point_b)\n if self.d1 > 0:\n inner_prod += self._inner_product_d1(\n one_forms_a,\n one_forms_b,\n one_forms_bp,\n areas_bp=areas_bp,\n inv_surface_metrics_bp=ginv_bp,\n )\n\n if self.b1 > 0 or self.a1 > 0:\n dga = (\n gs.matmul(\n one_forms_a, gs.transpose(one_forms_a, axes=(0, 1, 3, 2))\n )\n - surface_metrics_bp\n )\n dgb = (\n gs.matmul(\n one_forms_b, gs.transpose(one_forms_b, axes=(0, 1, 3, 2))\n )\n - surface_metrics_bp\n )\n ginvdga = gs.matmul(ginv_bp, dga)\n ginvdgb = gs.matmul(ginv_bp, dgb)\n inner_prod += self._inner_product_a1(ginvdga, ginvdgb, areas_bp)\n inner_prod += self._inner_product_b1(ginvdga, ginvdgb, areas_bp)\n return gs.squeeze(inner_prod, axis=0) if to_squeeze else inner_prod", "def multiply_vector(self, dv, spm):\n product = []\n for a, b in zip(dv, spm):\n product.append(a * b)\n return product" ]
[ "0.6906152", "0.6847398", "0.68069786", "0.67931485", "0.6750435", "0.6661231", "0.665229", "0.6584153", "0.6543558", "0.6477087", "0.645507", "0.64223343", "0.64149445", "0.64062977", "0.64052427", "0.6394551", "0.6389129", "0.6387153", "0.6369647", "0.6364393", "0.6364216", "0.63563746", "0.6349533", "0.6338918", "0.6337371", "0.6333778", "0.63327885", "0.6304801", "0.6301042", "0.6294226", "0.62937057", "0.6278996", "0.6254354", "0.6249698", "0.62419444", "0.62408894", "0.62312293", "0.6223513", "0.6194338", "0.61871403", "0.615942", "0.6153975", "0.6143662", "0.6125205", "0.61245793", "0.61241776", "0.6123317", "0.6119969", "0.6117163", "0.6109324", "0.61043894", "0.61042815", "0.6097835", "0.60862917", "0.6082641", "0.60778964", "0.607604", "0.60723877", "0.6049616", "0.60438526", "0.6033628", "0.6028014", "0.6025679", "0.60188544", "0.60178226", "0.6011806", "0.6011655", "0.6008298", "0.59859014", "0.59810317", "0.5980731", "0.59763557", "0.59692574", "0.5964098", "0.59478", "0.5943066", "0.5943066", "0.5943066", "0.5943066", "0.5943066", "0.5942895", "0.5942147", "0.5942145", "0.59349096", "0.59289217", "0.5925027", "0.59164935", "0.59005314", "0.58936363", "0.58843505", "0.5882821", "0.5874659", "0.58651155", "0.58648306", "0.5863016", "0.5861122", "0.5861097", "0.5857336", "0.5848796", "0.58481383" ]
0.83498394
0
Calculate the inner product pair of each pattern in batch one and batch two. Notice that the pattern_stack_one variable represent the pattern along the zero dimension while the pattern_stack_two variable represent patterns along dimension one in the final distance matrix.
def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two): """ Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the other half. """ holder = np.zeros((pattern_num_one, pattern_num_two)) for l in range(pattern_num_one): for m in range(pattern_num_two): holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m])) return holder
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inner_product(pattern_one, pattern_two):\n\n return np.sum(np.multiply(pattern_one, pattern_two))", "def l2_norm_batch(pattern_stack):\n\n return np.linalg.norm(pattern_stack, axis=0)", "def generate_pattern_grid(words1, words2):\n # Convert word lists to integer arrays\n w1, w2 = (\n np.array([[ord(c) for c in w] for w in words], dtype=np.uint8)\n for words in (words1, words2)\n )\n\n if len(w1) == 0 or len(w2) == 0:\n return np.zeros((len(w1), len(w2)), dtype=np.uint8)\n\n # equality_grid[a, b, i, j] represents whether the ith letter\n # of words1[a] equals the jth letter of words2[b]\n equality_grid = np.zeros((len(w1), len(w2), 5, 5), dtype=bool)\n for i, j in it.product(range(5), range(5)):\n equality_grid[:, :, i, j] = np.equal.outer(w1[:, i], w2[:, j])\n\n patterns = np.zeros((len(w1), len(w2)), dtype=np.uint8)\n three_pows = (3**np.arange(5)).astype(np.uint8)\n for i, tp in enumerate(three_pows):\n # This accounts for yellow squares\n patterns[:, :] += tp * equality_grid[:, :, i, :].any(2)\n # This accounts for green squares\n patterns[:, :] += tp * equality_grid[:, :, i, i]\n\n return patterns", "def pair_product(x1, x2):\n return np.multiply(x1, x2)", "def dim_mul(dims1, dims2):\n return (\n dims1[0] + dims2[0],\n dims1[1] + dims2[1],\n dims1[2] + dims2[2],\n dims1[3] + dims2[3],\n dims1[4] + dims2[4],\n dims1[5] + dims2[5],\n dims1[6] + dims2[6],\n )", "def axis_element_wise_multiplication(t1, t2, which_axis):\n # assert len(K.int_shape(t1)) == len(K.int_shape(t2)) + 1, \"rank(t1) should be rank(t2) + 1\"\n slices = tf.unstack(t1, axis=which_axis)\n # assert K.int_shape(slices[0]) == K.int_shape(t2), \"Slices of t1 were not the same shape as t2\"\n multiplies = []\n for s in slices:\n multiplies.append(t2 * s)\n return tf.stack(multiplies, axis=2)", "def _prod_vectorized(M1, M2):\n sh1 = M1.shape\n sh2 = M2.shape\n assert len(sh1) >= 2\n assert len(sh2) >= 2\n assert sh1[-1] == sh2[-2]\n\n ndim1 = len(sh1)\n t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]\n return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *\n M2[..., np.newaxis, :], -3)", "def double_chop_pairs(\n x1, y1, z1, w1, cell1, x2, y2, z2, w2, indx2, rbins_squared, result):\n start = cuda.grid(1)\n stride = cuda.gridsize(1)\n\n n1 = x1.shape[0]\n nbins = rbins_squared.shape[0]\n\n for i in range(start, n1, stride):\n px = x1[i]\n py = y1[i]\n pz = z1[i]\n pw = w1[i]\n\n cell1_i = cell1[i]\n first = indx2[cell1_i]\n last = indx2[cell1_i+1]\n\n for j in range(first, last):\n qx = x2[j]\n qy = y2[j]\n qz = z2[j]\n qw = w2[j]\n\n dx = px-qx\n dy = py-qy\n dz = pz-qz\n wprod = pw*qw\n dsq = dx*dx + dy*dy + dz*dz\n\n k = nbins-1\n while dsq <= rbins_squared[k]:\n cuda.atomic.add(result, k-1, wprod)\n k = k-1\n if k <= 0:\n break", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)", "def mult(m1, m2):\n assert np.shape(m1) == (2, 3)\n assert np.shape(m2) == (2, 3)\n\n m1_temp = np.vstack((m1, [0, 0, 1]))\n m2_temp = np.vstack((m2, [0, 0, 1]))\n result = m1_temp * m2_temp\n\n return result[:2, :]", "def _inner_product_d1(\n self, one_forms_a, one_forms_b, one_forms_bp, areas_bp, inv_surface_metrics_bp\n ):\n one_forms_bp_t = gs.transpose(one_forms_bp, (0, 2, 1))\n\n one_forms_a_t = gs.transpose(one_forms_a, (0, 1, 3, 2))\n xa = one_forms_a_t - one_forms_bp_t\n\n xa_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xa, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xa),\n )\n\n one_forms_b_t = gs.transpose(one_forms_b, (0, 1, 3, 2))\n xb = one_forms_b_t - one_forms_bp_t\n xb_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xb, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xb),\n )\n\n return self.d1 * gs.sum(\n gs.einsum(\n \"...bii->...b\",\n gs.matmul(\n xa_0,\n gs.matmul(\n inv_surface_metrics_bp, gs.transpose(xb_0, axes=(0, 1, 3, 2))\n ),\n ),\n )\n * areas_bp\n )", "def _kronecker_product(mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor:\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def dot_product(a,b):\n return sum(pairwise_mult(a,b))", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def inner_product_similarity(a: torch.Tensor, b: torch.Tensor, dim=1) -> torch.Tensor:\n outputs = (a * b).sum(dim=dim)\n return outputs", "def gradient_merge_arrays(cls, image_one, image_two):\n if image_one.shape != image_two.shape:\n raise AttributeError(\"shapes do not match: {} vs {}\".format(image_one.shape, image_two.shape))\n height = image_one.shape[0]\n vector_one = numpy.array([1.0 - float(i + 1) / (height + 1) for i in range(height)])\n vector_two = numpy.array([float(i + 1) / (height + 1) for i in range(height)])\n return (image_one * vector_one[:, numpy.newaxis]) + (image_two * vector_two[:, numpy.newaxis])", "def mul(self,mat1,mat2):\n if(isinstance(mat2,int)==True):\n result = [[mat1[i][j] * mat2 for j in range(len(mat1[0]))] for i in range(len(mat1))]\n self.out = result\n return self.out\n elif(len(mat1[0])==len(mat2)):\n result = [[sum(a*b for a,b in zip(i,j)) for j in zip(*mat2)] for i in mat1]\n self.out = result\n return self.out", "def batch_outer_product(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # This is a batchwise version of the matrix multiplication approach\n # used for outer_product(), see explanation there.\n return a[:, :, np.newaxis] * b[:, np.newaxis, :]", "def dotproduct(x, y):\n return sum(imap(operator.mul, x, y))", "def _layerwise_dot_product(x_s, y_s):\n return [torch.sum(x * y).item() for x, y in zip(x_s, y_s)]", "def pairwise_distances(a, b, p=2):\n squeezed = False\n if len(a.shape) == 2 and len(b.shape) == 2:\n a = tf.expand_dims(a,0) #[np.newaxis, :, :]\n b = tf.expand_dims(a,0) #b[np.newaxis, :, :]\n squeezed = True\n \n ret = tf.reduce_sum(tf.keras.backend.pow(tf.math.abs(tf.expand_dims(a,2) - tf.expand_dims(b,1)), p), 3)\n #[:, :, np.newaxis, :], [:, np.newaxis, :, :]\n if squeezed:\n ret = tf.squeeze(ret)\n\n return ret", "def pair_eval(self, X, Y):\n d1 = self.d1\n K1 = self.k1.pair_eval(X[:, :d1], Y[:, :d1])\n K2 = self.k2.pair_eval(X[:, d1:], Y[:, d1:])\n return K1 * K2", "def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)", "def dot_product(vector1, vector2):\n return [reduce_by_multiplication(pair) for pair in zip(vector1, vector2)]", "def product_2(m1, m2):\r\n return make_mono_admissible_2(list(m1) + list(m2))", "def innerprod_q2(q1, q2):\n T = q1.shape[1]\n val = sum(sum(q1 * q2)) / T\n\n return (val)", "def dist_matrix(self, group1, group2):\n \n tmps = []\n for i in group2:\n tmps.append([])\n for j in group1:\n mi, label = self.distance(i, j)\n tmps[-1].append(mi)\n return tmps", "def _interleave_ecdfs(\n x1: np.ndarray,\n y1: np.ndarray,\n x2: np.ndarray,\n y2: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n assert len(x1.shape) == len(x2.shape) == 1\n assert x1.shape == y1.shape\n assert x2.shape == y2.shape\n\n x = np.sort(np.concatenate([x1, x2]))\n y1 = np.insert(y1, 0, [0])\n y2 = np.insert(y2, 0, [0])\n return x, y1[np.searchsorted(x1, x, side='right')], y2[np.searchsorted(x2, x, side='right')]", "def alignPairShapes(s1,s2,weights):\n\n\n s1=np.asarray(s1)\n s2=np.asarray(s2)\n \n x1k=s1[:,0]\n y1k=s1[:,1]\n x2k=s2[:,0]\n y2k=s2[:,1]\n\n X1=sum(x1k*weights) \n X2=sum(x2k*weights)\n\n Y1=sum(y1k*weights)\n Y2=sum(y2k*weights)\n\n Z=sum(weights*(pow(x2k,2)+pow(y2k,2)))\n\n W=sum(weights)\n\n C1=sum(weights*(x1k*x2k+y1k*y2k))\n\n C2=sum(weights*(y1k*x2k-x1k*y2k))\n \n a=np.asarray([[X2,-Y2,W,0],[Y2,X2,0,W],[Z,0,X2,Y2],[0,Z,-Y2,X2]])\n b=np.asarray([X1,Y1,C1,C2])\n\n x=np.linalg.solve(a,b)\n\n ax=x[0]\n ay=x[1]\n tx=x[2]\n ty=x[3]\n return ax,ay,tx,ty", "def strassen(m1, m2):\n \n if ((m1.shape[0] % 2 == 0) or (m1.shape[0] == 1)):\n n = m1.shape[0] \n else:\n n = m1.shape[0] + 1\n result = np.zeros((n, n), dtype = int)\n \n if (n == 1):\n result[0][0] = m1[0][0] * m2[0][0]\n else:\n new = n//2\n \n a11, a12, a21, a22 = m1[:new, :new], m1[new:, :new], m1[:new, new:], m1[new:, new:]\n b11, b12, b21, b22 = m2[:new, :new], m2[new:, :new], m2[:new, new:], m2[new:, new:]\n \n p1 = strassen(a11, b12 - b22)\n p2 = strassen(a11 + a12, b22)\n p3 = strassen(a21 + a22, b11)\n p4 = strassen(a22, b21 - b11)\n p5 = strassen(a11 + a22, b11 + b22)\n p6 = strassen(a12 - a22, b21 + b22)\n p7 = strassen(a11 - a21, b11 + b12)\n \n result[:new, :new] = p5 + p4 - p2 + p6\n result[new:, :new] = p1 + p2\n result[:new, new:] = p3 + p4 \n result[new:, new:] = p5 + p1 - p3 - p7\n \n return result", "def matrix_mult(m1, m2):\n\ttemp = []\n\tfor i in range(len(m1)):\n\t\te = []\n\t\tfor j in range(len(m2[0])):\n\t\t\te.append(row_times_column(m1,i,m2,j))\n\t\ttemp.append(e)\n\treturn temp", "def solve_part_two(self):\n return self.outputs[0] * self.outputs[1] * self.outputs[2]", "def kronecker_product(mat1, mat2):\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def multiplicand_2(p):\n m2 = cddr(p) # (m2 m3 ...)\n rests = cdr(m2) # (m3...)\n if isNull(rests):\n return car(m2)\n else:\n restp = convertToPythonList(cdr(rests))\n return make_product_2(car(m2), car(rests), *restp)", "def _mulVectors(X1,X2):\n _checkSize(X1,X2)\n return sum([ X1[i] * X2[i] for i in range(len(X1))])", "def complex_mul2d(a, b):\n op = partial(torch.einsum, \"bixy,ioxy->boxy\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def __call__(self, sample_1, sample_2, alphas, ret_matrix=False):\n sample_12 = torch.cat((sample_1, sample_2), 0)\n distances = pdist(sample_12, sample_12, norm=2)\n\n kernels = None\n for alpha in alphas:\n kernels_a = torch.exp(- alpha * distances ** 2)\n if kernels is None:\n kernels = kernels_a\n else:\n kernels = kernels + kernels_a\n\n k_1 = kernels[:self.n_1, :self.n_1]\n k_2 = kernels[self.n_1:, self.n_1:]\n k_12 = kernels[:self.n_1, self.n_1:]\n\n mmd = (2 * self.a01 * k_12.sum() +\n self.a00 * (k_1.sum() - torch.trace(k_1)) +\n self.a11 * (k_2.sum() - torch.trace(k_2)))\n if ret_matrix:\n return mmd, kernels\n else:\n return mmd", "def product_map(xs1, xs2):\n return jax.vmap(lambda x1: jax.vmap(lambda x2: pair_product(x1, x2))(xs2))(xs1)", "def Multiply(M1,M2):\r\n M3=[]\r\n w=0\r\n while w<len(M2[0]):\r\n tap=[]\r\n t=0\r\n while t<len(M2):\r\n tap.append(M2[t][w])\r\n t=t+1\r\n M3.append(tap)\r\n w=w+1\r\n M=[]\r\n # Multiplying matrices\r\n k=0\r\n sums=0\r\n while k<len(M1):\r\n j=0\r\n mpy=[]\r\n while j<len(M3):\r\n p=0\r\n sums=0\r\n while p<len(M3[j]):\r\n temp = (M1[k][p])*(M3[j][p])\r\n sums=sums+temp\r\n p=p+1\r\n mpy.append(sums)\r\n j=j+1\r\n M.append(mpy)\r\n k=k+1\r\n return M", "def mat_mul(mat1, mat2):\n\n rows1 = len(mat1)\n cols1 = len(mat1[0])\n rows2 = len(mat2)\n cols2 = len(mat2[0])\n\n if cols1 != rows2:\n return None\n else:\n new_matrix = []\n for x in range(rows1):\n aux_row = []\n for y in range(cols2):\n aux_sum = []\n for z in range(cols1):\n aux_sum.append(mat1[x][z] * mat2[z][y])\n aux_row.append(sum(aux_sum))\n new_matrix.append(aux_row)\n\n return new_matrix", "def unfolded_product(m1, m2, coords = None, mx = None):\n if coords is None:\n mx = np.max( np.array([np.max(np.abs(m1.coords), axis = 0), \n np.max(np.abs(m2.coords), axis = 0)]), axis = 0)\n\n coords = lattice_coords(mx)\n else:\n if mx is not None:\n coords = lattice_coords(mx)\n #print(coords, coords[int(len(coords)/2)])\n M1 = m1.tofull(m1,coords,coords)\n M2 = m2.tofull(m2,coords,coords)\n \n RET = np.dot(M1, M2)\n rnx = m1.blocks.shape[1]\n rny = m2.blocks.shape[2]\n rc = int(len(coords)/2)\n retblocks = RET[rnx*rc:rnx*(rc+1),:].reshape(rnx, coords.shape[0], rny).swapaxes(0,1)\n \n ret = tmat()\n ret.load_nparray(retblocks, coords)\n return ret", "def recursive_multiply(a, b):\n if len(a) == 2:\n return naive_multiply(a, b)\n\n a11 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a11):\n a11[index] = row[0:int(len(row) / 2)]\n\n a12 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a12):\n a12[index] = row[int(len(a) / 2):len(a)]\n\n a21 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a21):\n a21[index] = row[0:int(len(row) / 2)]\n\n a22 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a22):\n a22[index] = row[int(len(a) / 2):len(a)]\n\n b11 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b11):\n b11[index] = row[0:int(len(row) / 2)]\n\n b12 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b12):\n b12[index] = row[int(len(b) / 2):len(b)]\n\n b21 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b21):\n b21[index] = row[0:int(len(row) / 2)]\n\n b22 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b22):\n b22[index] = row[int(len(b) / 2):len(b)]\n\n c11 = matrix_add(recursive_multiply(a11, b11), recursive_multiply(a12, b21)) # C11 = A11*B11 + A12*B21\n c12 = matrix_add(recursive_multiply(a11, b12), recursive_multiply(a12, b22)) # C12 = A11*B12 + A12*B22\n c21 = matrix_add(recursive_multiply(a21, b11), recursive_multiply(a22, b21)) # C21 = A21*B11 + A22*B21\n c22 = matrix_add(recursive_multiply(a21, b12), recursive_multiply(a22, b22)) # C22 = A21*B12 + A22*B22\n\n # Append c12 to c11\n for row_index, row in enumerate(c11):\n for col_index, col in enumerate(c12):\n row.append(c12[row_index][col_index])\n\n # Append c22 to c21\n for row_index, row in enumerate(c21):\n for col_index, col in enumerate(c12):\n row.append(c22[row_index][col_index])\n\n # Append c21 to c11\n for i in c21:\n c11.append(i)\n\n return c11", "def squared_distance_calculator(position1, position2):\r\n difference_vector = position2 - position1\r\n return np.dot(difference_vector, difference_vector)", "def multiply(A, B, *args, **kwargs):\n dim=A.__len__()\n assert(dim==B.__len__())\n C=[]\n for ii in range(dim):\n shape=(A[ii].shape[0]*B[ii].shape[0], A[ii].shape[1])\n val=np.empty(shape)\n for iimn, (mm, nn) in enumerate(itertools.product(list(range(A[ii].shape[0])), list(range(B[ii].shape[0])))):\n val[iimn] = A[ii][mm]*B[ii][nn]\n C.append(val)\n return C", "def mul(Z,X,Y):", "def _mps_AA(self, A1, A2):\n Dl, d1, _ = A1.shape\n _, d2, Dr = A2.shape\n return np.reshape(np.tensordot(A1, A2, axes=(2, 0)), [Dl, d1 * d2, Dr])", "def brute_multiply(x, y):\n \n n = x.shape[0]\n res = np.zeros(x.shape)\n \n for i in range(n):\n for j in range(n):\n for k in range(n):\n res[i, j] += x[i, k] * y[k, j]\n \n return res", "def compute_surface_distances(mask1, mask2, voxel_dimensions=1):\n structuring_el_size = tuple(3 for _ in mask1.shape)\n grad1 = morphological_gradient(mask1.astype(int), size=structuring_el_size)\n grad2 = morphological_gradient(mask2.astype(int), size=structuring_el_size)\n\n if not _is_iterable(voxel_dimensions):\n voxel_dimensions = [voxel_dimensions for _ in mask1.shape]\n voxel_dimensions = np.array(voxel_dimensions).reshape(1, -1)\n\n nonzeros_1 = np.array(np.nonzero(grad1)).T * voxel_dimensions\n nonzeros_2 = np.array(np.nonzero(grad2)).T * voxel_dimensions\n return np.sort(_compute_set_distances(nonzeros_1, nonzeros_2))", "def relate_pattern(a, b, pattern, **kwargs):\n return lib.relate_pattern(a, b, pattern, **kwargs)", "def _inner_product_c1(self, point_a, point_b, normals_bp, areas_bp):\n dna = self._space.normals(point_a) - normals_bp\n dnb = self._space.normals(point_b) - normals_bp\n return self.c1 * gs.sum(\n gs.einsum(\"...bi,...bi->...b\", dna, dnb) * areas_bp, axis=-1\n )", "def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z", "def product_on_basis(self, t1, t2):\n return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.", "def complex_mul(x1, x2):\n assert x1.size(-1) == 2 and x2.size(-1) == 2\n\n res = torch.stack(\n (x1[..., 0]*x2[..., 0]-x1[..., 1]*x2[..., 1],\n x1[..., 0]*x2[..., 1] + x1[..., 1]*x2[..., 0]), -1)\n\n return res", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def squared_dist(x1: np.ndarray, x2: np.ndarray) -> np.ndarray:\n return (\n np.sum(x1 ** 2, 1).reshape(-1, 1) +\n np.sum(x2 ** 2, 1) -\n 2 * np.dot(x1, x2.T)\n )", "def product(self):\n return self.right[self.i:] + self.left[:self.i], self.left[self.i:] + self.right[:self.i]", "def mat_mul(mat1, mat2):\n\n if len(mat1[0]) == len(mat2):\n\n mat2 = matrix_transpose(mat2)\n response = []\n\n for row in range(len(mat1)):\n response.append(\n [\n sum(dot_product(mat1[row], mat2[column]))\n for column in range(len(mat2))\n ]\n )\n\n return response\n\n else:\n return None", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def dot_product(a, b):\n dp = 0.0\n for i, j in zip(a, b):\n dp += i * j\n return dp", "def hadmardProduct(self, a,b):\n\t\tnumRows = len(a)\n\t\tnumCols = len(a[0])\n\t\n\t\treturn [[a[j][i] * b[j][i] for i in range(numCols)] for j in range(numRows)]", "def __surface_distances(result, reference, voxelspacing=None, connectivity=1):\n result = np.atleast_1d(result.astype(np.bool))\n reference = np.atleast_1d(reference.astype(np.bool))\n if voxelspacing is not None:\n voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim)\n voxelspacing = np.asarray(voxelspacing, dtype=np.float64)\n if not voxelspacing.flags.contiguous:\n voxelspacing = voxelspacing.copy()\n \n # binary structure\n footprint = generate_binary_structure(result.ndim, connectivity)\n \n # test for emptiness\n if 0 == np.count_nonzero(result): \n raise RuntimeError('The first supplied array does not contain any binary object.')\n if 0 == np.count_nonzero(reference): \n raise RuntimeError('The second supplied array does not contain any binary object.') \n \n # extract only 1-pixel border line of objects\n result_border = result ^ binary_erosion(result, structure=footprint, iterations=1)\n # print(result_border+0)\n # result_border = (result_border+0).astype(np.float32)\n # imsave(\"./test_comp1/\" +\"truth.jpg\", result_border)\n reference_border = reference ^ binary_erosion(reference, structure=footprint, iterations=1)\n # print(reference_border)\n # reference_border = (reference_border + 0).astype(np.float32)\n # imsave(\"./test_comp1/\" +\"truth.jpg\", reference_border)\n # compute average surface distance \n # Note: scipys distance transform is calculated only inside the borders of the\n # foreground objects, therefore the input has to be reversed\n dt = distance_transform_edt(~reference_border, sampling=voxelspacing)\n # print(dt)\n reference_border = (reference_border + 0).astype(np.float32)\n # imsave(\"./test_comp1/\" +\"truth.jpg\", reference_border)\n sds = dt[result_border]\n \n return sds", "def dotProduct(v1, v2):\n return sum((a * b) for a, b in zip(v1, v2))", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def products(q_1: Qs, q_2: Qs, kind: str = \"\", reverse: bool = False) -> Qs:\n\n q_1_copy = deepcopy(q_1)\n q_2_copy = deepcopy(q_2)\n qs_left, qs_right = Qs(), Qs()\n\n # Diagonalize if need be.\n if ((q_1.rows == q_2.rows) and (q_1.columns == q_2.columns)) or (\n \"scalar_q\" in [q_1.qs_type, q_2.qs_type]\n ):\n\n if q_1.columns == 1:\n qs_right = q_2_copy\n qs_left = diagonal(q_1_copy, qs_right.rows)\n\n elif q_2.rows == 1:\n qs_left = q_1_copy\n qs_right = diagonal(q_2_copy, qs_left.columns)\n\n else:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n # Typical matrix multiplication criteria.\n elif q_1.columns == q_2.rows:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n else:\n print(\n \"Oops, cannot multiply series with row/column dimensions of {}/{} to {}/{}\".format(\n q_1.rows, q_1.columns, q_2.rows, q_2.columns\n )\n )\n\n # Operator products need to be transposed.\n operator_flag = False\n if qs_left in [\"op\", \"operator\"] and qs_right in [\"op\", \"operator\"]:\n operator_flag = True\n\n outer_row_max = qs_left.rows\n outer_column_max = qs_right.columns\n shared_inner_max = qs_left.columns\n projector_flag = (\n (shared_inner_max == 1) and (outer_row_max > 1) and (outer_column_max > 1)\n )\n\n result = [\n [q0(q_type=\"\") for _i in range(outer_column_max)]\n for _j in range(outer_row_max)\n ]\n\n for outer_row in range(outer_row_max):\n for outer_column in range(outer_column_max):\n for shared_inner in range(shared_inner_max):\n\n # For projection operators.\n left_index = outer_row\n right_index = outer_column\n\n if outer_row_max >= 1 and shared_inner_max > 1:\n left_index = outer_row + shared_inner * outer_row_max\n\n if outer_column_max >= 1 and shared_inner_max > 1:\n right_index = shared_inner + outer_column * shared_inner_max\n\n result[outer_row][outer_column] = add(result[outer_row][outer_column],\n product(qs_left.qs[left_index],\n qs_right.qs[right_index], kind=kind, reverse=reverse\n )\n )\n\n # Flatten the list.\n new_qs = [item for sublist in result for item in sublist]\n new_states = Qs(new_qs, rows=outer_row_max, columns=outer_column_max)\n\n if projector_flag or operator_flag:\n return transpose(new_states)\n\n else:\n return new_states", "def test_l2_metric_inner_product_vectorization(\n self,\n l2_metric_s2,\n times,\n n_landmark_sets,\n landmarks_a,\n landmarks_b,\n landmarks_c,\n ):\n landmarks_ab = l2_metric_s2.geodesic(landmarks_a, landmarks_b)\n landmarks_bc = l2_metric_s2.geodesic(landmarks_b, landmarks_c)\n landmarks_ab = landmarks_ab(times)\n landmarks_bc = landmarks_bc(times)\n\n tangent_vecs = l2_metric_s2.log(point=landmarks_bc, base_point=landmarks_ab)\n\n result = l2_metric_s2.inner_product(tangent_vecs, tangent_vecs, landmarks_ab)\n\n self.assertAllClose(gs.shape(result), (n_landmark_sets,))", "def product(self, x, y):\n return self( x.lift() * y.lift() )", "def _dot_product_attention_inner_relative(x, y, z, transpose):\n batch_size, heads, length, _ = x.size()\n\n # xy_matmul is [batch_size, heads, length, length or depth]\n xy_matmul = torch.matmul(x, y if not transpose else y.transpose(-2, -1))\n # x_t is [length, batch_size, heads, length or depth]\n x_t = x.permute(2, 0, 1, 3)\n # x_t_r is [length, batch_size * heads, length or depth]\n x_t_r = x_t.view(length, batch_size * heads, -1)\n # x_tz_matmul is [length, batch_size * heads, length or depth]\n x_tz_matmul = torch.matmul(x_t_r, z if not transpose else z.transpose(-2, -1))\n # x_tz_matmul_r is [length, batch_size, heads, length or depth]\n x_tz_matmul_r = x_tz_matmul.view(length, batch_size, heads, -1)\n # x_tz_matmul_r_t is [batch_size, heads, length, length or depth]\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\n\n return xy_matmul + x_tz_matmul_r_t", "def apply_feature_matching(desc1: np.ndarray, desc2: np.ndarray,\n match_calculator: Callable[[list, list], float]) -> list:\n\n # Check descriptors dimensions are 2\n assert desc1.ndim == 2, \"Descriptor 1 shape is not 2\"\n assert desc2.ndim == 2, \"Descriptor 2 shape is not 2\"\n\n # Check that the two features have the same descriptor type\n assert desc1.shape[1] == desc2.shape[1], \"Descriptors shapes are not equal\"\n\n # If there is not key points in any of the images\n if desc1.shape[0] == 0 or desc2.shape[0] == 0:\n return []\n\n # number of key points in each image\n num_key_points1 = desc1.shape[0]\n num_key_points2 = desc2.shape[0]\n\n # List to store the matches scores\n matches = []\n\n # Loop over each key point in image1\n # We need to calculate similarity with each key point in image2\n for kp1 in range(num_key_points1):\n # Initial variables which will be updated in the loop\n distance = -np.inf\n y_index = -1\n\n # Loop over each key point in image2\n for kp2 in range(num_key_points2):\n\n # Match features between the 2 vectors\n value = match_calculator(desc1[kp1], desc2[kp2])\n\n # SSD values examples: (50, 200, 70), we need to minimize SSD (Choose 50)\n # In case of SSD matching: (value is returned as a \"negative\" number) (-50, -200, -70)\n # So we compare it with -np.inf. (The sorting will be reversed later)\n\n # NCC values examples: (0.58, 0.87, 0.42), we need to maximize NCC (Choose 0.87)\n # In case of NCC matching: (value is returned as a \"positive\" number) (-58, -0.87, -0.42)\n # So we compare it with -np.inf. (The sorting will be reversed later)\n\n if value > distance:\n distance = value\n y_index = kp2\n\n # Create a cv2.DMatch object for each match and set attributes:\n # queryIdx: The index of the feature in the first image\n # trainIdx: The index of the feature in the second image\n # distance: The distance between the two features\n cur = cv2.DMatch()\n cur.queryIdx = kp1\n cur.trainIdx = y_index\n cur.distance = distance\n matches.append(cur)\n\n return matches", "def euclidean_squared_distance(input1, input2):\n m, n = input1.size(0), input2.size(0)\n mat1 = torch.pow(input1, 2).sum(dim=1, keepdim=True).expand(m, n)\n mat2 = torch.pow(input2, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n distmat = mat1 + mat2\n distmat.addmm_(input1, input2.t(), beta=1, alpha=-2)\n return distmat", "def interior_tensor_product(mx, dim_a, dim_b, e=None):\n assert _np.shape(mx) == (dim_a * dim_b, dim_a * dim_b), \"Dimensions do not agree with matrix size\"\n assert _np.shape(e)[0] == _np.shape(e)[1], \"e should be a square matrix\"\n basis_a = matrix_units(dim_a)\n basis_b = matrix_units(dim_b)\n return sum((_np.trace(_np.dot(mx, _np.kron(unit_a, unit_b).T)) * multikron([unit_a, e, unit_b])\n for unit_a in basis_a for unit_b in basis_b))", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def dotproduct(vec1, vec2):\n return sum((a*b) for a, b in zip(vec1, vec2))", "def dist_squared (a, b):\n return sum(map(lambda (x,y): (x-y)**2, zip(a, b)))", "def times(self, other):\r\n result = TriangularArray(self.num_rows)\r\n for i in range(self.num_rows):\r\n for j in range(self.num_rows):\r\n # Calculate the [i, j] entry.\r\n total = 0\r\n for k in range(j, i + 1):\r\n total += self[(i, k)] * other[(k, j)]\r\n result[(i, j)] = total\r\n return result", "def dot_product(A, B):\n # Section 1: Ensure A and B dimensions are the same\n rowsA = len(A); colsA = len(A[0])\n rowsB = len(B); colsB = len(B[0])\n if rowsA != rowsB or colsA != colsB:\n raise ArithmeticError('Matrices are NOT the same size.')\n\n # Section 2: Sum the products \n total = 0\n for i in range(rowsA):\n for j in range(colsB):\n total += A[i][j] * B[i][j]\n\n return total", "def distance(f1, f2):\n\n return np.sum((np.sum([f1, -f2], axis=0))**2, axis=1)", "def divide(m1,m2):\n \n if ((m1.shape[0] % 2 == 0) or (m1.shape[0] == 1)):\n n = m1.shape[0]\n else:\n n = m1.shape[0] + 1\n result = np.zeros((n, n), dtype = int)\n \n if (n == 1):\n result[0][0] = m1[0][0] * m2[0][0]\n else:\n new = n//2\n \n a11, a12, a21, a22 = m1[:new, :new], m1[new:, :new], m1[:new, new:], m1[new:, new:]\n b11, b12, b21, b22 = m2[:new, :new], m2[new:, :new], m2[:new, new:], m2[new:, new:]\n \n result[:new, :new] = divide(a11,b11) + divide(a12,b21)\n result[new:, :new] = divide(a11,b12) + divide(a12,b22)\n result[:new, new:] = divide(a21,b11) + divide(a22,b21)\n result[new:, new:] = divide(a21,b12) + divide(a22,b22)\n \n return result", "def __mul__(self, other):\n # \n # TODO - your code here\n #\n \n result = [];\n row_result = [];\n product = 0;\n \n if(self.w != other.h):\n raise(ValueError, \"Matrices can not multiply for their dimesion doesn't match\"); \n \n for row in self.g:\n row_result = [];\n for j in range(other.w):\n product = dot_product(row,other.get_column(j));\n row_result.append(product);\n result.append(row_result);\n \n return Matrix(result);", "def matrix_product(mat1: List[List[int]], mat2: List[List[int]]):\n if len(mat1) == 0 or len(mat2) == 0:\n raise ValueError(\"One of matrix is empty\")\n n, k1 = len(mat1), len(mat1[0])\n k2, m = len(mat2), len(mat2[0])\n if k1 != k2:\n raise ValueError(\n f\"Can't multiply two matrices with shapes {n}x{k1} and {k2}x{m}\"\n )\n mat2_t = matrix_transpose(mat2)\n return [[vec_product(vec1, vec2) for vec2 in mat2_t] for vec1 in mat1]", "def prod(self, x, y):\n return (self.basic_operation.reduce(x.original+y.original),\n self.operation1.prod(x.left, y.left),\n self.operation2.prod(x.right, y.right))", "def tile_calculation(xi, yi, axi, ayi, positions, weights):\n for j in range(cuda.blockDim.x):\n xj = positions[j,0]\n yj = positions[j,1]\n wj = weights[j]\n axi, ayi = body_body_interaction(xi, yi, xj, yj, wj, axi, ayi)\n return axi, ayi", "def dot(pepx1, pepx2):\n\n Ls = pepx1.shape\n assert pepx2.shape==Ls, '[dot]: sizes of pepx1 and pepx2 are not equal'\n new_pepx = np.empty(Ls, dtype=np.object)\n new_lams = np.empty(pepx1.lambdas.shape, dtype=np.object)\n\n # if np.all([ pepx1[i].ndim==3 and pepx2[i].ndim==3 for i in np.ndenumerate(pepx1) ]):\n # return peps_dot(pepx1,pepx2)\n # else:\n for idx in np.ndindex(Ls):\n len_dp1 = len(pepx1.phys_bonds[idx])\n len_dp2 = len(pepx2.phys_bonds[idx])\n ax1 = [0,2,4,6] + range(8, 8+len_dp1)\n ax2 = [1,3,5,7] + range(8+len_dp1-1,8+len_dp1+len_dp2-1)\n ax2[-len_dp2] = ax1[-1] # contract vertical bonds (mpx1 down with mpx2 up)\n new_site = np.einsum(pepx1[idx],ax1,pepx2[idx],ax2)\n new_pepx[idx] = tf.reshape(new_site,'ii,ii,ii,ii,...',group_ellipsis=False)\n\n i,j = idx\n for xx in range(new_lams.shape[2]):\n new_lams[i,j,xx] = np.outer(pepx1.lambdas[i,j,xx], pepx2.lambdas[i,j,xx]).reshape(-1)\n # print new_lams[i,j,xx].shape\n\n return PEPX_GL(new_pepx,new_lams) #,pepx1.phys_bonds)", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def pairwise_dot_product_similarity(x, y):\n return torch.mm(x, torch.transpose(y, 1, 0))", "def dotproduct(vec1, vec2):\n import operator\n return sum(map(operator.mul, vec1, vec2))", "def classical(m1,m2):\n \n n = m1.shape\n result = np.zeros(n, dtype = int)\n\n for i in range(n[0]):\n for j in range(n[0]):\n for k in range(n[0]):\n result[i][j] += m1[i][k] * m2[k][j]\n return result", "def dot_product(a, b):\n a1, a2, a3 = a\n b1, b2, b3 = b\n return a1 * b1 + a2 * b2 + a3 * b3", "def _product(self, args):\n pools = map(tuple, args) #within original version args defined as *args\n result = [[]]\n for pool in pools:\n result = [x + [y] for x in result for y in pool]\n return result", "def linear_interpolation(atoms1, atoms2, N):\n \n pos1 = atoms1.positions\n pos2 = atoms2.positions\n images = [atoms1]\n for n in range(N):\n nn = n + 1\n atoms_tmp = atoms1.copy()\n atoms_tmp.positions = ( (N+1-nn)*pos1 + nn*pos2 ) / (N+1)\n images += [atoms_tmp]\n images += [atoms2]\n return images", "def euclidean_distance_2d(XA: np.ndarray, XB: np.ndarray):\n out = np.empty((XA.shape[0], XB.shape[0]), dtype=XA.dtype)\n for i in numba.prange(XA.shape[0]):\n for j in range(XB.shape[0]):\n out[i, j] = np.sqrt((XA[i, 0] - XB[j, 0]) ** 2 + (XA[i, 1] - XB[j, 1]) ** 2)\n return out", "def sqeuclidean_distance_2d(XA: np.ndarray, XB: np.ndarray):\n out = np.empty((XA.shape[0], XB.shape[0]), dtype=XA.dtype)\n for i in numba.prange(XA.shape[0]):\n for j in range(XB.shape[0]):\n out[i, j] = (XA[i, 0] - XB[j, 0]) ** 2 + (XA[i, 1] - XB[j, 1]) ** 2\n return out", "def __surface_distances(result, reference, voxelspacing=None, connectivity=1):\n result = np.atleast_1d(result.astype(np.bool))\n reference = np.atleast_1d(reference.astype(np.bool))\n if voxelspacing is not None:\n voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim)\n voxelspacing = np.asarray(voxelspacing, dtype=np.float64)\n if not voxelspacing.flags.contiguous:\n voxelspacing = voxelspacing.copy()\n \n # binary structure\n footprint = generate_binary_structure(result.ndim, connectivity)\n \n # test for emptiness\n if 0 == np.count_nonzero(result): \n raise RuntimeError('The first supplied array does not contain any binary object.')\n if 0 == np.count_nonzero(reference): \n raise RuntimeError('The second supplied array does not contain any binary object.') \n \n # extract only 1-pixel border line of objects\n result_border = result ^ binary_erosion(result, structure=footprint, iterations=1)\n\n reference_border = reference ^ binary_erosion(reference, structure=footprint, iterations=1)\n\n dt = distance_transform_edt(~reference_border, sampling=voxelspacing)\n # print(dt)\n reference_border = (reference_border + 0).astype(np.float32)\n\n sds = dt[result_border]\n \n return sds", "def _tensor_batch_dot(t1: Tensor, t2: Tensor) -> Tensor:\n\n msg = (\n \"Please ensure each batch member has the same feature dimension. \"\n f\"First input has {torch.numel(t1) / t1.shape[0]} features, and \"\n f\"second input has {torch.numel(t2) / t2.shape[0]} features.\"\n )\n assert torch.numel(t1) / t1.shape[0] == torch.numel(t2) / t2.shape[0], msg\n\n return torch.mm(\n t1.view(t1.shape[0], -1),\n t2.view(t2.shape[0], -1).T,\n )", "def __mul__(self,other):\n # \n # 注意矩阵的A的列 与 相乘矩阵B的行必须相等,才能进行运算\n height = 0\n width = 0\n if isinstance(other, list): # 判断other是否是矩阵,即list形式的矩阵\n height = len(other)\n width = len(other[0])\n else:\n # 如果是对象,则直接获取行列值\n height = other.h\n width = other.w\n\n\n my_mul = zeroes(self.h, self.w)\n if self.w == height: # 两个矩阵的行列值需要相等 才能相乘\n for i in range(self.h):\n for j in range(width):\n my_sum = 0\n for k in range(height):\n if isinstance(other, list):\n my_sum += self.g[i][k] * other[k][j]\n # 通过3个循环变量取所有矩阵的行列值\n else:\n my_sum += self.g[i][k] * other.g[k][j]\n my_mul[i][j] = my_sum\n return my_mul \n else:\n return NotImplementedError", "def __mul__ (self, other):\n return perm(*(self._getcycles() + other._getcycles()))", "def product(self, x, y):\n return self._cached_product(x.value, y.value)", "def distance(brd1,brd2):\n\n step=brd1[1,0]-brd1[0,0]\n return np.sum(np.abs(brd1[:,1]-brd2[:,1]))*step", "def map_feature(X1, X2):\n degree = 6\n out = np.ones(X1.shape)\n \n for i in range(1, degree + 1):\n for j in range(0, i + 1):\n out = np.concatenate((out, np.power(X1, (i - j)) * np.power(X2, j)), axis=-1)\n\n return out" ]
[ "0.773317", "0.5649336", "0.5526998", "0.54341394", "0.53960305", "0.5378274", "0.5336186", "0.5309336", "0.529776", "0.5289525", "0.5283448", "0.52754015", "0.52056116", "0.5135578", "0.50863296", "0.5050334", "0.5024294", "0.49954486", "0.4992397", "0.49735647", "0.49717188", "0.4961927", "0.4959781", "0.4940064", "0.49399367", "0.4932615", "0.49212724", "0.49055746", "0.48976764", "0.48897642", "0.4866107", "0.4843088", "0.48255998", "0.48251253", "0.4819184", "0.4816141", "0.4810712", "0.4809668", "0.48034745", "0.47891194", "0.47860396", "0.47842", "0.47838998", "0.47635022", "0.47585952", "0.4753178", "0.475048", "0.4735247", "0.47303593", "0.4726892", "0.47224283", "0.47199464", "0.47169435", "0.4713793", "0.47126818", "0.46998528", "0.46976426", "0.46967214", "0.46959233", "0.4695676", "0.46955204", "0.46913534", "0.4683207", "0.46802652", "0.46719408", "0.4671064", "0.46693748", "0.46641195", "0.46614587", "0.46605617", "0.46574455", "0.4656189", "0.46512765", "0.46493345", "0.46483332", "0.46456802", "0.4631644", "0.46263385", "0.46255046", "0.46240968", "0.46236375", "0.46200195", "0.46048486", "0.4601232", "0.45925498", "0.45832348", "0.45785397", "0.45778146", "0.4576788", "0.45632055", "0.45613247", "0.4555602", "0.45501024", "0.45486438", "0.45476693", "0.45455432", "0.4541269", "0.45396024", "0.45381728", "0.45372927" ]
0.87116134
0
This function extract the squared l2 norm from the inner product matrix.
def l2_square_from_inner_product(matrix): return np.diag(matrix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inner_product_to_normalized_L2_square(matrix):\n\n length = matrix.shape[0]\n norm = np.divide(1, np.sqrt(l2_square_from_inner_product(matrix)))\n\n normalized_inner_product = np.multiply(np.multiply(np.reshape(norm, [length, 1]), matrix),\n np.reshape(norm, [1, length]))\n return 2 - 2 * normalized_inner_product", "def inner_product_to_L2_square(matrix):\n\n length = matrix.shape[0]\n squared_norm = np.reshape(np.diag(matrix), (length, 1))\n\n return squared_norm + np.transpose(squared_norm) - 2 * matrix", "def L2norm(m):\n return np.sqrt(np.sum(m**2))", "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def norm_L2(u):\n return norm_l2(u)/sqrt(float(u.size))", "def l2_norm(params):\n flattened, _ = flatten(params)\n return np.dot(flattened, flattened)", "def l2(vec):\n return np.linalg.norm(vec)", "def norm_l2(v):\n return np.sqrt((v**2).sum())", "def l2_norm(self):\n return (self.x**2 + self.y**2 + self.z**2)**0.5", "def norm_l2(u):\n return linalg.norm(u.ravel())", "def l2_norm(v):\n res = 0\n for e in v:\n res += e * e\n return math.sqrt(res)", "def _l2_norm_squared(self, z, theta):\n norms = np.zeros(shape=(len(z), self.n_states))\n\n for j in range(self.n_states):\n diff = theta[:, j] - z # ndarray of shape (n_samples, n_states) with differences\n norms[:, j] = np.square(np.linalg.norm(diff, axis=1)) # squared state conditional l2 norms\n\n return norms # squared l2 norm.", "def squared_norm(self) -> float:\n return self.__real**2 + self.__img[0]**2 + self.__img[1]**2 + self.__img[2]**2", "def tree_l2_norm(tree_x, squared=False):\n squared_tree = tree_map(jnp.square, tree_x)\n sqnorm = tree_sum(squared_tree)\n if squared:\n return sqnorm\n else:\n return jnp.sqrt(sqnorm)", "def norm_with_l2(original_mat):\n normed_mat = np.zeros(original_mat.shape, dtype=np.float32)\n if len(original_mat.shape) == 2:\n for ind_r in range(original_mat.shape[0]):\n a = np.square(original_mat[ind_r]*1.0)\n b = np.sum(a)\n c = np.sqrt(b)\n normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / c\n # normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / np.sqrt(np.sum(np.square(original_mat[ind_r])*1.0))\n return normed_mat", "def calculate_error_l2_norm(self, dY):\n solutions = []\n norm = 0.\n for mi in range(len(self._meshes)):\n for ei in range(len(self._meshes[mi].elements)):\n e = self._meshes[mi].elements[ei]\n # change this to gauss points:\n x_vals, w = p_roots(20)\n norm_e_squared = 0.\n for i, x in enumerate(x_vals):\n norm_e_squared += w[i] * \\\n self.get_sol_value(mi, ei, dY, x,\n count_lift=False)**2\n norm_e_squared *= e.jacobian\n norm += norm_e_squared\n return sqrt(norm)", "def norm2(self):\n\t\treturn self.x ** 2 + self.y ** 2 + self.z ** 2", "def norm2d(self) -> float:\n\n return self.v2ddict.norm2d()", "def l2_norm(vec_or_matrix):\n if len(vec_or_matrix.shape) == 1:\n # linear vector\n return vec_or_matrix / np.linalg.norm(vec_or_matrix)\n elif len(vec_or_matrix.shape) == 2:\n return vec_or_matrix / np.linalg.norm(vec_or_matrix, axis=1, ord=2)[:, np.newaxis]\n else:\n raise ValueError('Wrong number of dimensions, 1 or 2 is supported, not %i.' % len(vec_or_matrix.shape))", "def l2_norm(pattern):\n return np.linalg.norm(pattern)", "def l2(v, axis=None):\n length = v.shape[0]\n return np.sqrt(np.sum(np.square(v), axis=axis) / length)", "def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))", "def _l2s(self, params):\n return [np.linalg.norm(param) for param in params]", "def norm_sqr(x):\n return inner_prod(x, x)[0]", "def compute_L2_normalization(xx):\r\n\treturn np.sum(xx ** 2, axis=1)", "def test_scale_features_L2_norm(self):\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # correct answer computed in Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[0.0304526, 0.409996], [-0.999536, 0.816936], [-0.000485946, 0.40561]])\n\n # perform L2 normalization and check answer\n cdata.scale_features('L2 norm')\n self.assertTrue(allclose(cdata.data, answer))", "def norm2(point):\n return np.sum(point**2, -1)", "def l2_norm(self, input):\n input_size = input.size()\n buffer = torch.pow(input, 2)\n normp = torch.sum(buffer, 1).add_(1e-10)\n norm = torch.sqrt(normp)\n _output = torch.div(input, norm.view(-1, 1).expand_as(input))\n output = _output.view(input_size)\n\n return output", "def sigma_norm2( self):\n return self._sigma2", "def _l2_normalize(x, axis=None, eps=1e-12):\n return x * jax.lax.rsqrt((x * x).sum(axis=axis, keepdims=True) + eps)", "def L2norm(self, array):\n norm = torch.sqrt(torch.sum(array * array))\n return norm", "def norm2(self):\n return getattr(self, self.norm2_name)", "def norm(x):\n return inner_prod(x, x)[0].sqrt_()", "def squared_norm(self, vector, base_point=None):\n sq_norm = self.inner_product(vector, vector, base_point)\n return gs.real(sq_norm)", "def two_norm(v):\n return math.sqrt(dot_product(v, v))", "def l2norm(array1,array2):\r\n tot = np.sum(np.abs(array1)**2)\r\n return np.sqrt(np.sum(np.abs(array1-array2)**2)/tot)", "def norm(self):\n\t\treturn math.sqrt(self.norm2())", "def norm(self):\n mag_squared = self._sum_of_squares()\n return sqrt(mag_squared)", "def normF2(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return LA.norm(X, 'fro')**2", "def fast_2_norm(A):\n v = np.random.rand(A.shape[1], 1)\n return la.norm(A.dot(v))", "def norm2_r(self, a: np.ndarray) -> float:\n return a.T @ a", "def l2norm(X):\n norm = np.linalg.norm(X, axis=1, keepdims=True)\n return 1.0 * X / norm", "def squared_norm(self, vector, base_point=None):\n sq_norm = self.embedding_metric.squared_norm(vector)\n return sq_norm", "def square_norm(x):\n return np.linalg.norm(x) ** 2", "def normSq(self):\n\t\treturn self.x*self.x+self.y*self.y", "def L2_norm(x, axis=-1):\n return keras.backend.l2_normalize(x, axis=axis)", "def l2norm_(X, Xstar):\n return cdist(X, Xstar)", "def normsq(self):\n return sum(x**2 for x in self.data)", "def norm2d(w_in):\n return nn.BatchNorm2d(num_features=w_in, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)", "def L2_normalize(xx):\r\n\tZx = compute_L2_normalization(xx)\r\n\treturn xx / np.sqrt(Zx[:, np.newaxis])", "def l2_normalize(data, axis=-1, eps=1e-6):\n ret = data / (np.linalg.norm(data, axis=axis, keepdims=True) + eps)\n return ret", "def norm(self):\n\t\treturn np.sqrt(self.normSq())", "def vec_2norm (x):\n return math.sqrt (sum ([x_i**2 for x_i in x]))", "def normr(Mat):\n B = normalize(Mat, norm='l2', axis=1)\n return B", "def norm(self):\n return sqrt(self.dot(self))", "def norm(self):\n return math.sqrt(self.dotProduct(self))", "def squared_norm(self, vector, base_point=None):\n args = {\n \"vector\": vector,\n \"base_point\": base_point,\n }\n sq_norms = self._iterate_over_factors(\"squared_norm\", args)\n return sum(sq_norms)", "def norm(x):\n return np.sqrt(norm2(x))", "def get_sqrt_2():\n return 1.41421356", "def l2(x1, x2):\n return np.sqrt((x1 - x2)**2)", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def l2_norm_batch(pattern_stack):\n\n return np.linalg.norm(pattern_stack, axis=0)", "def squaredDistance(vec1, vec2):\n return (distance.euclidean(vec1, vec2))**2", "def normalize_l2norm(data,tol=0):\n data_sqrt=np.sqrt(np.square(data).sum(axis=1))\n data_sqrt.shape=(data_sqrt.shape[0],1)\n #tol=0#1e-8\n data=data/(data_sqrt+tol)\n return data", "def L2_dists(x, y):\n #print(x.shape)\n #print(y.shape)\n dists = -2 * np.matmul(x, y.T)\n dists += np.sum(x**2)[np.newaxis]\n dists += np.sum(y**2)\n return np.sqrt(dists)", "def L2_func(x):\n return K.expand_dims(K.sqrt(K.sum(K.pow(x,2), axis=1)))", "def l2_normalize(data, eps, axis=None):\n return cpp.nn.l2_normalize(data, eps, axis)", "def get_L(self, X):\n if issparse(X):\n return slinalg.norm(X, axis=1) ** 2\n else:\n return norm(X, axis=1) ** 2", "def getNorm(self, norm=lambda l: (sum(map(lambda x: x ** 2, l))) ** (1 / 2)):\n return norm(self.components)", "def getNormLaplacian(W):\n\td=[np.sum(row) for row in W]\n\tD=np.diag(d)\n\tL=D-W\n\t#Dn=D^(-1/2)\n\tDn=np.power(np.linalg.matrix_power(D,-1),0.5)\n\tLbar=np.dot(np.dot(Dn,L),Dn)\n\treturn Lbar", "def project_L2(w, l):\n return w * min(1, 1 / (l ** (1 / 2.0) * np.linalg.norm(w, 2)))", "def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)", "def getL2Error(self,exactSolution):\n value = 0\n error = np.array(self.solution)-np.array([exactSolution(x) for x in self.triangulation.points])\n for ele,triPoints in enumerate(self.triangulation.simplices):\n transformMatrix,translateVector = self.calculateTransform(ele)\n determinant = abs(np.linalg.det(transformMatrix))\n #Last vector is the precalculated integral of the basisfunctions over a reference element\n value+=determinant*np.dot(error[triPoints]**2,np.array([1/6.,1/3.,1/3.]))\n return(math.sqrt(value))", "def l21_norm_bound(X, Y, loss='square'):\n if loss.lower() == 'square':\n # In this case max_tau := 2/n * max(||[X^T * Y]s||_2)\n # First compute the 2-norm of each row of X^T * Y\n norm2 = map(lambda x: np.linalg.norm(x, ord=2), X.T.dot(Y))\n return np.max(norm2) * (2.0/X.shape[0])\n else:\n print('Only square loss implemented so far.')\n sys.exit(-1)", "def norm(self):\n C = np.prod([F.T @ F for F in self.factors], axis=0)\n return np.sqrt(np.sum(C))", "def calculate_dist_mat_2(A: np.ndarray, B: np.array, norm: int) -> np.ndarray:\n kwargs = {'p': norm}\n dist_mat = cdist(A, B, metric='minkowski', **kwargs)\n return dist_mat", "def P2l_rec_norm(ells, cost):\n P22 = 3. * (1. - cost**2)\n P23 = 15. * cost * (1. - cost**2)\n P2l = np.zeros(len(ells))\n P2l[0] = 0.\n P2l[1] = 0.\n P2l[2] = P22\n P2l[3] = P23\n P2l_norm = np.copy(P2l)\n P2l_norm[2] *= P2l_norm_prefac(2)\n P2l_norm[3] *= P2l_norm_prefac(3)\n for ell in ells[4:]:\n # print ell, P2l[ell-1], P2l[ell-2]\n a = np.sqrt((4 * ell**2 - 1.) / (ell**2 - 4))\n b = cost * P2l_norm[ell - 1]\n c = np.sqrt(((ell - 1.)**2 - 4) /\n (4 * (ell - 1.)**2 - 1)) * P2l_norm[ell - 2]\n # print a,b,c\n P2l_norm[ell] = a * (b - c)\n # print ell, P2l_norm[ell], P2l_norm_prefac(ell)\n P2l[ell] = P2l_norm[ell] / P2l_norm_prefac(ell)\n return P2l", "def get_norm(self, l):\n return self._W.norm(l)", "def get_norm(self, l):\n return self._W.norm(l)", "def norm(point):\n return np.sqrt(norm2(point))", "def l2norm(X):\n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n X = torch.div(X, norm)\n return X", "def l2norm(X):\n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n X = torch.div(X, norm)\n return X", "def l2norm(X):\n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n X = torch.div(X, norm)\n return X", "def sqnorm(v):\n res = 0\n for elt in v:\n for coef in elt:\n res += coef ** 2\n return res", "def l2norm(X): \n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n a = norm.expand_as(X) + 1e-8\n X = torch.div(X, a) \n return X", "def L2_dists_vectorized(x, y):\n dists = -2 * np.matmul(x, y.T)\n dists += np.sum(x**2, axis=1)[:, np.newaxis]\n dists += np.sum(y**2, axis=1)\n return np.sqrt(dists)", "def norm(self):\n norm = self.scalarProduct(self) ** 0.5\n return norm", "def normalize_l2(x):\n return x / (npla.norm(x))", "def l2norm(X, dim=-1, eps=1e-8):\r\n norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps\r\n X = torch.div(X, norm)\r\n return X", "def normsq(self):\n return abs(sum(self._ar * self._ar))", "def dist_squared(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n return (self - vec2) * (self - vec2)", "def norm(self):\n if self._coord_format != constants.MatrixCoordinateDefault:\n self._logger.error(\"invalid coordinate format\")\n raise NotImplementedError(\"invalid coordinate format\")\n\n if self._dtype == complex:\n def __map(m):\n return m[2].real ** 2 + m[2].imag ** 2\n else:\n def __map(m):\n return m[2] ** 2\n\n n = self._data.map(\n __map\n ).reduce(\n lambda a, b: a + b\n )\n\n return math.sqrt(n)", "def squared_norm(self, x_tensors=None):\n if x_tensors is None:\n x_tensors = self.x_tensors()\n\n return numpy.sum([squared_L2_norm(t) for t in x_tensors])", "def matrix_L2(l, omega, S, cn):\n zt = omega * S / cn['t']\n L = np.array(wN2(l, zt), wN4(l, zt))\n return L.T", "def l2p(x, y):\n \n return square_size[0]*x, square_size[1]*y", "def norm(alpha, F):\n return inner_product(alpha, F, alpha)", "def norm(self) -> float:\n return self.squared_norm()**0.5", "def norm(self):\n return np.sqrt(np.dot(self._data, self._data))", "def l2norm_1d(new, old):\n\tdiff = 0\n\tnx = len(new)\n\tfor i, n in enumerate(new):\n\t\tif n:\n\t\t\tdiff += ((n - old[i])/n)**2\n\tnorm = scipy.sqrt(diff/nx)\n\treturn norm", "def l2_distance(v1, v2):\n\treturn np.linalg.norm(np.array(v1) - np.array(v2))" ]
[ "0.8038655", "0.79935986", "0.7481027", "0.7333756", "0.71862626", "0.7168722", "0.71422154", "0.71272767", "0.7005889", "0.6986283", "0.6936558", "0.689807", "0.6859919", "0.68299115", "0.6818152", "0.678622", "0.67799747", "0.6730021", "0.67066455", "0.6701476", "0.6701306", "0.6688216", "0.66403055", "0.6579155", "0.6573765", "0.6558734", "0.6543347", "0.6512711", "0.6489012", "0.64723676", "0.64642733", "0.64614713", "0.6459877", "0.6446045", "0.6443651", "0.63937837", "0.6317596", "0.628092", "0.6263609", "0.6259707", "0.6255387", "0.6230586", "0.6226179", "0.6217083", "0.62118757", "0.6187874", "0.6169813", "0.61610216", "0.6136922", "0.6123859", "0.609898", "0.6096916", "0.6053245", "0.60363936", "0.60154194", "0.601107", "0.59963846", "0.59841037", "0.5964484", "0.59570616", "0.5944093", "0.5933876", "0.5932054", "0.5931658", "0.5924176", "0.5907942", "0.59000313", "0.5875353", "0.5841822", "0.5826781", "0.58147866", "0.58085686", "0.58085084", "0.5793662", "0.578605", "0.5772651", "0.5759394", "0.57579535", "0.57579535", "0.57414836", "0.5711277", "0.5711277", "0.5711277", "0.5704038", "0.5691994", "0.5691083", "0.5689474", "0.5684701", "0.56827873", "0.568219", "0.5681298", "0.5679438", "0.5676075", "0.56741947", "0.5667119", "0.5660092", "0.5649604", "0.56457275", "0.5636244", "0.56249326" ]
0.7382337
3
Turns the inner product matrix into the |pattern1 pattern2|_{2}^2 matrix. The matrix has to be a square matrix
def inner_product_to_L2_square(matrix): length = matrix.shape[0] squared_norm = np.reshape(np.diag(matrix), (length, 1)) return squared_norm + np.transpose(squared_norm) - 2 * matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inner_product(pattern_one, pattern_two):\n\n return np.sum(np.multiply(pattern_one, pattern_two))", "def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two):\n\n \"\"\"\n Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the \n other half.\n \"\"\"\n holder = np.zeros((pattern_num_one, pattern_num_two))\n for l in range(pattern_num_one):\n for m in range(pattern_num_two):\n holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m]))\n\n return holder", "def l2_square_from_inner_product(matrix):\n return np.diag(matrix)", "def matrix_subs(matrix_2x2, point):\n arr = []\n for el in matrix_2x2:\n arr.append(el.subs(x1, point[0]).subs(x2, point[1]))\n \n M = Matrix([[arr[0], arr[1]], [arr[2], arr[3]]])\n\n return M", "def matrix_mult(m1, m2):\n\ttemp = []\n\tfor i in range(len(m1)):\n\t\te = []\n\t\tfor j in range(len(m2[0])):\n\t\t\te.append(row_times_column(m1,i,m2,j))\n\t\ttemp.append(e)\n\treturn temp", "def python_nonsquare_matrix_mult(matrix):\n\n transposed_matrix = np.zeros([matrix.shape[1],matrix.shape[0]])\n start = time.time()\n # for i in range(matrix.shape[0]):\n # for j in range(matrix.shape[1]):\n # transposed_matrix[j,i] = matrix[i,j]\n\n transposed_matrix = np.transpose(matrix)\n product = matrix.dot(transposed_matrix)\n\n # transposed_matrix = np.transpose(matrix)\n end = time.time()-start\n\n # print(\"Python Golden Transpose: %s\" % product)\n # print('python transpose time: %.2E' % end)\n return [product, end]", "def inner_product_to_normalized_L2_square(matrix):\n\n length = matrix.shape[0]\n norm = np.divide(1, np.sqrt(l2_square_from_inner_product(matrix)))\n\n normalized_inner_product = np.multiply(np.multiply(np.reshape(norm, [length, 1]), matrix),\n np.reshape(norm, [1, length]))\n return 2 - 2 * normalized_inner_product", "def mat_mul(mat1, mat2):\n\n rows1 = len(mat1)\n cols1 = len(mat1[0])\n rows2 = len(mat2)\n cols2 = len(mat2[0])\n\n if cols1 != rows2:\n return None\n else:\n new_matrix = []\n for x in range(rows1):\n aux_row = []\n for y in range(cols2):\n aux_sum = []\n for z in range(cols1):\n aux_sum.append(mat1[x][z] * mat2[z][y])\n aux_row.append(sum(aux_sum))\n new_matrix.append(aux_row)\n\n return new_matrix", "def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)", "def _kronecker_product(mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor:\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def Mxform(x1,y1,x2,y2):\n return Jones.toMueller([[np.dot(x2,x1), np.dot(x2, y1)], [np.dot(y2,x1), np.dot(y2,y1)]])", "def matrix_mult(m1, m2):\n pass", "def rotate2D(self, matrix) -> None:\n N = len(matrix)\n\n # In case of N is odd, the innermost square belt is just one cell, no need of rotating.\n for i in range(0,int(N/2)): # outer loop for each square belt\t\t\t\n for j in range(i,N-i-1): # N-i group in the i-th square belt\n #print(i,j)\n tmp = matrix[i][j]\n matrix[i][j] = matrix[N-j-1][i]\n matrix[N-j-1][i] = matrix[N-i-1][N-j-1]\n matrix[N-i-1][N-j-1] = matrix[j][N-i-1]\n matrix[j][N-i-1] = tmp\n #print(matrix)", "def matrixMul(self, matrix, matrix2):\n matrix0 = matrix[:]\n matrix[0] = matrix0[0] * matrix2[0] + matrix0[2]*matrix2[1] # + matrix0[4]*0\n matrix[1] = matrix0[1] * matrix2[0] + matrix0[3]*matrix2[1] # + matrix0[5]*0\n matrix[2] = matrix0[0] * matrix2[2] + matrix0[2]*matrix2[3] # + matrix0[4]*0\n matrix[3] = matrix0[1] * matrix2[2] + matrix0[3]*matrix2[3] # + matrix0[5]*0\n matrix[4] = matrix0[0] * matrix2[4] + matrix0[2]*matrix2[5] + matrix0[4]\n matrix[5] = matrix0[1] * matrix2[4] + matrix0[3]*matrix2[5] + matrix0[5]", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def form_square_block_matrix(mat1,mat2):\n if mat1.cols==1:\n mat3 = mp.matrix(mat1.rows+mat2.rows,1)\n mat3[:mat1.rows] = mat1[:]\n mat3[mat1.rows:mat3.rows] = mat2[:]\n else:\n mat3 = mp.matrix(mat1.rows+mat2.rows, mat1.rows+mat2.rows)\n mat3[:mat1.rows,:mat1.rows] = mat1[:,:]\n mat3[mat1.rows:mat3.rows,mat1.rows:mat3.rows] = mat2[:,:]\n return mat3", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def mul(self,mat1,mat2):\n if(isinstance(mat2,int)==True):\n result = [[mat1[i][j] * mat2 for j in range(len(mat1[0]))] for i in range(len(mat1))]\n self.out = result\n return self.out\n elif(len(mat1[0])==len(mat2)):\n result = [[sum(a*b for a,b in zip(i,j)) for j in zip(*mat2)] for i in mat1]\n self.out = result\n return self.out", "def matrix_product(mat1: List[List[int]], mat2: List[List[int]]):\n if len(mat1) == 0 or len(mat2) == 0:\n raise ValueError(\"One of matrix is empty\")\n n, k1 = len(mat1), len(mat1[0])\n k2, m = len(mat2), len(mat2[0])\n if k1 != k2:\n raise ValueError(\n f\"Can't multiply two matrices with shapes {n}x{k1} and {k2}x{m}\"\n )\n mat2_t = matrix_transpose(mat2)\n return [[vec_product(vec1, vec2) for vec2 in mat2_t] for vec1 in mat1]", "def kronecker_product(mat1, mat2):\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def mmultiply(self, matrix):\n try:\n result_matrix = [[0 for row in range(len(self.matrix))] for col in range(len(matrix[0]))]\n for i in range(len(self.matrix)):\n for j in range(len(matrix[0])):\n for k in range(len(matrix)):\n result_matrix[i][j] += self.matrix[i][k] * matrix[k][j]\n self.matrix = result_matrix\n except IndexError:\n pass\n pass", "def StrassenMatrixM(a, b):\r\n if len(a) != 2 or len(a[0]) != 2 or len(b) != 2 or len(b[0]) != 2:\r\n raise Exception('Matrices should be 2x2!')\r\n print(a[0][0] * b[0][1] + a[0][1] * b[1][1])\r\n matrix = [[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],\r\n [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]]]\r\n\r\n return matrix", "def matrix_mult_matrix(matrix_a, matrix_b):\n m = len(matrix_a)\n n = len(matrix_b)\n result = []\n matrix_b_t = transpose_matrix(matrix_b)\n for i in xrange(m):\n row = []\n\tfor j in xrange(m):\n row.append(dot_product(matrix_a[i], matrix_b_t[j]))\n\tresult.append(row)\n return result", "def __mul__(self,m):\n if type(m) != Matrix:\n raise TypeError('The second argument is not a matrix lol')\n if self.ncols != m.nrows:\n raise ValueError('matrix dot argument has incorrect number of rows')\n new = Matrix(self.nrows,m.ncols)\n columns = m.getCols()\n rowindex = 0\n colindex = 0 \n for row in self.matrix:\n colindex = 0 \n for col in columns:\n summ = 0\n for i,j in zip(row,col):\n summ+= i*j \n new.matrix[rowindex][colindex] = summ\n print new.matrix\n colindex += 1 \n rowindex+=1\n return new", "def matrix_multiplication_loop(x_matrix, y_matrix):\n result = []\n for i, row in enumerate(x_matrix):\n row_vector = []\n for j in range(len(y_matrix[0])):\n product = 0\n for k in range(len(row)):\n product += x_matrix[i][k] * y_matrix[k][j]\n row_vector.append(product)\n result.append(row_vector)\n return result", "def prod_mat(self,other):\n [rs,cs],[ro,co] = self.D,other.D\n assert cs == ro, \"tailles incompatibles\"\n return Mat([rs,co], lambda i,j : prod_scal(self.ligne(i),other.col(j)))", "def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)", "def mat_mul(mat1, mat2):\n\n if len(mat1[0]) == len(mat2):\n\n mat2 = matrix_transpose(mat2)\n response = []\n\n for row in range(len(mat1)):\n response.append(\n [\n sum(dot_product(mat1[row], mat2[column]))\n for column in range(len(mat2))\n ]\n )\n\n return response\n\n else:\n return None", "def multiply_by_left_matrix(matrix, img):\n first = np.inner(matrix[0], img)\n second = np.inner(matrix[1], img)\n third = np.inner(matrix[2], img)\n\n res = np.dstack((first, second, third))\n return res", "def __mul__(self, other):\n # \n # TODO - your code here\n #\n \n result = [];\n row_result = [];\n product = 0;\n \n if(self.w != other.h):\n raise(ValueError, \"Matrices can not multiply for their dimesion doesn't match\"); \n \n for row in self.g:\n row_result = [];\n for j in range(other.w):\n product = dot_product(row,other.get_column(j));\n row_result.append(product);\n result.append(row_result);\n \n return Matrix(result);", "def square_matrix_multiply(a, b):\n n = len(a)\n c = [[0]*n for _ in range(n)]\n for i in range(n):\n for j in range(n):\n sm = 0\n for k in range(n):\n sm += (a[i][k] * b[k][j])\n c[i][j] = sm\n\n return c", "def matrix_mult(A,B):\n\n m = len(A)\n p = len(B)\n n = len(B[0])\n AB = []\n for i in range(m):\n AB.append([])\n for j in range(n):\n total = 0\n for k in range(p):\n total += A[i][k] * B[k][j]\n AB[i].append(total)\n return AB", "def solve_2x2(matrix: FieldMatrix,\n rhs: Fields) -> OutputFields:\n _validate_matrix_shape(matrix, (2, 2))\n\n a, b = matrix[0]\n c, d = matrix[1]\n e, f = rhs\n\n inv_factor = det_2x2(matrix)\n\n return [\n tf.nest.map_structure(tf.math.divide_no_nan, det_2x2([\n [e, b],\n [f, d],\n ]), inv_factor),\n tf.nest.map_structure(tf.math.divide_no_nan, det_2x2([\n [a, e],\n [c, f],\n ]), inv_factor),\n ]", "def outer_product(A, B): \n print(A)\n print(B)\n A_rows = len(A)\n A_columns = len(A[0])\n\n B_rows = len(B)\n B_columns = len(B[0])\n \n if A_columns == 1 and B_rows == 1:\n \n outer_product = []\n\n # multi-line list comprehension for outer product\n [outer_product.append([A[i][0] * B[0][j] for j in range(B_columns)]) \n for i in range(A_rows)]\n\n return outer_product\n\n else:\n print(\"dimensions of vector do not match.\")", "def matrix_mult(m1, m2):\n output = []\n for rowIndex, row in enumerate(m1): #go through rows in m1\n new_row = []\n for columnIndex in range(len(m2[0])): #go through indices for each column of m2\n sum = 0\n for index3 in range(len(row)):\n product = m1[rowIndex][index3] * m2[index3][columnIndex]\n sum += product\n new_row.append(sum)\n output.append(new_row)\n return output\n \n \n #output = []\n #first for loop corresponds to the rows of my output matrix and loops through the rows of m1 (enumerate)\n #create an empty new row\n # second for loop, loops through columns of m2\n # create sum variable, initialize it with zero\n # third for loop, multiplies the index of the row in m1 times the index of the column in m2\n # add sum to product and assign this to the sum variable\n # append sum to new row\n # append new row to output\n # return output", "def transformation_2d(vertices, kernels=KERNELS):\n\t# calculate the transpose matrix of vertices\n\ttranspose = vertices.transpose()\n\t# insert a row of ones in the transpose matrix's end, then insert the result in 'matrices' list\n\tkernels.append(np.append(transpose, [np.ones(len(transpose[0]))], axis=0))\n\t# multiply matrices into 'kernels' list,\n\t# remove the last row (of ones) and calculate the transpose matrix of the result\n\tfinal_transformation_result = np.delete(np.linalg.multi_dot(kernels), 2, 0).transpose()\n\tKERNELS.clear()\n\treturn final_transformation_result", "def matrix_apply_to_2d(data, matrix: np.matrix):\n from scipy import mgrid\n\n cx = data.shape[0] / 2\n cy = data.shape[1] / 2\n\n # Calculate the new coordinates of every point\n grid = mgrid[-cx:data.shape[0]-cx, -cy:data.shape[1]-cy]\n temp = grid.reshape((2, grid.size / 2))\n # Add the fourth dimension (just 1s but needed for the computations)\n newrow = np.ones(grid.size / 2)\n temp = np.vstack([temp, newrow])\n # Use the matrix to calculate the new positions of every point\n temp = np.dot(matrix, temp)\n # Delete the fourth dimension\n temp = np.delete(temp, 2, axis=0)\n temp = np.array(temp)\n grid = np.reshape(temp, (2, data.shape[0], data.shape[1]))\n\n grid[0] += cx\n grid[1] += cy\n\n from scipy.ndimage.interpolation import map_coordinates\n d = map_coordinates(data, grid, order=3)\n\n return d", "def cat_matrices2D(mat1, mat2, axis=0):\n res = []\n if axis == 1:\n if len(mat1) == len(mat2):\n for i in mat1:\n for j in mat2:\n res.append(i+[j[0]])\n mat2.remove(j)\n else:\n return None\n else:\n for i in mat2:\n if len(i) == len(mat1[0]):\n res.append(mat1 + [i])\n else:\n return None\n return (res)", "def add_matrices2D(mat1, mat2):\n if len(mat1) != len(mat2):\n return None\n if len(mat1[0]) != len(mat2[0]):\n return None\n return [[ele1 + ele2 for ele1, ele2 in zip(row1, row2)]\n for row1, row2 in zip(mat1, mat2)]", "def recursive_multiply(a, b):\n if len(a) == 2:\n return naive_multiply(a, b)\n\n a11 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a11):\n a11[index] = row[0:int(len(row) / 2)]\n\n a12 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a12):\n a12[index] = row[int(len(a) / 2):len(a)]\n\n a21 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a21):\n a21[index] = row[0:int(len(row) / 2)]\n\n a22 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a22):\n a22[index] = row[int(len(a) / 2):len(a)]\n\n b11 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b11):\n b11[index] = row[0:int(len(row) / 2)]\n\n b12 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b12):\n b12[index] = row[int(len(b) / 2):len(b)]\n\n b21 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b21):\n b21[index] = row[0:int(len(row) / 2)]\n\n b22 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b22):\n b22[index] = row[int(len(b) / 2):len(b)]\n\n c11 = matrix_add(recursive_multiply(a11, b11), recursive_multiply(a12, b21)) # C11 = A11*B11 + A12*B21\n c12 = matrix_add(recursive_multiply(a11, b12), recursive_multiply(a12, b22)) # C12 = A11*B12 + A12*B22\n c21 = matrix_add(recursive_multiply(a21, b11), recursive_multiply(a22, b21)) # C21 = A21*B11 + A22*B21\n c22 = matrix_add(recursive_multiply(a21, b12), recursive_multiply(a22, b22)) # C22 = A21*B12 + A22*B22\n\n # Append c12 to c11\n for row_index, row in enumerate(c11):\n for col_index, col in enumerate(c12):\n row.append(c12[row_index][col_index])\n\n # Append c22 to c21\n for row_index, row in enumerate(c21):\n for col_index, col in enumerate(c12):\n row.append(c22[row_index][col_index])\n\n # Append c21 to c11\n for i in c21:\n c11.append(i)\n\n return c11", "def get_molecular_matrix(single_body, two_body):\n x, y = single_body.shape\n func = np.vectorize(round_custom)\n _new_dim = x * y\n single_one_dim = single_body.reshape(_new_dim, 1)\n two_body_two_dim = func(two_body.reshape(_new_dim, _new_dim))\n idx = 0\n x, _ = two_body_two_dim.shape\n while idx < x:\n two_body_two_dim[idx][idx] = round_custom(single_one_dim[idx][0])\n idx += 1\n return two_body_two_dim", "def strassen_dot(x, y):\n# X = (A B) Y = (E F)\n# (C D) (G H)\n# \n# A through H -- (n/2 x n/2) matrices\n# \n# let P1 = A(F-H), P2 = (A+B)H, P3 = (C+D)E, P4 = D(G-E), \n# \n# P5 = (A+D)(E+H), P6 = (B-D)(G+H), P7 = (A-C)(E+F)\n# \n# XY = (AE+BG AF+BH) = (P5+P4-P2+P6 P1+P2 )\n# (CE+DG CF+DH) (P3+P4 P1+P5-P3-P7)\n\n n = x.shape[0]\n originalN = n\n \n # base case (n<=8)\n if n <= 8:\n return brute_multiply(x, y)\n \n n_is_power_of_two = (n & (n-1) == 0)\n\n if not n_is_power_of_two:\n # count the closest power of 2 above n\n # and resize x and y matrices\n newN = upper_pow_2(n)\n \n n = newN\n \n nX = np.zeros( (n, n) )\n# mapping x on to nX:\n nX[:originalN, :originalN] = x\n x = nX\n \n nY = np.zeros( (n, n) )\n# mapping y on to nY:\n nY[:originalN, :originalN] = y\n y = nY\n \n \n a = x[ 0:n//2, 0:n//2 ]\n b = x[ 0:n//2, n//2: ]\n c = x[ n//2:, 0:n//2 ]\n d = x[ n//2:, n//2: ]\n\n e = y[ 0:n//2, 0:n//2 ]\n f = y[ 0:n//2, n//2: ]\n g = y[ n//2:, 0:n//2 ]\n h = y[ n//2:, n//2: ]\n \n# let P1 = A(F-H), P2 = (A+B)H, P3 = (C+D)E, P4 = D(G-E), \n# \n# P5 = (A+D)(E+H), P6 = (B-D)(G+H), P7 = (A-C)(E+F)\n \n # recursively compute 7 (SEVEN) products P1 .. P7\n p1 = strassen_dot(a, f-h)\n p2 = strassen_dot(a+b, h)\n p3 = strassen_dot(c+d, e)\n p4 = strassen_dot(d, g-e)\n p5 = strassen_dot(a+d, e+h)\n p6 = strassen_dot(b-d, g+h)\n p7 = strassen_dot(a-c, e+f)\n \n# do the necessary (clever) additions & substractions with P1..P7\n \n# XY = (AE+BG AF+BH) = (P5+P4-P2+P6 P1+P2 )\n# (CE+DG CF+DH) (P3+P4 P1+P5-P3-P7)\n res1 = np.hstack( (p5+p4-p2+p6, p1+p2) )\n res2 = np.hstack( (p3+p4, p1+p5-p3-p7) )\n res = np.vstack( (res1, res2) )\n\n return res[:originalN, :originalN]", "def inner_product(alpha, F, beta):\n return np.dot(alpha, np.dot(F, beta))", "def matrixMultiplication(firstMatrix, secondMatrix):\n if len(firstMatrix[0]) == len(secondMatrix): # Checks whether the matrices can be multiplied or not or not\n finalMatrix = []\n for y in range(len(firstMatrix)): # 2\n currentMatrix = []\n for i in range(len(secondMatrix[0])):\n currentSum = 0\n for j in range(len(secondMatrix)):\n currentSum += secondMatrix[j][i] * firstMatrix[y][j]\n currentMatrix.append(currentSum)\n print(\"This is my current matrix: \" + str(currentMatrix))\n finalMatrix.append(currentMatrix)\n print(\"This product of the two matrices is :) \" + str(finalMatrix))\n else:\n print(\"This operation cannot be done, make sure the rows of the first matrix is the same as the number of columns in the second matrix\")", "def _inner_product_d1(\n self, one_forms_a, one_forms_b, one_forms_bp, areas_bp, inv_surface_metrics_bp\n ):\n one_forms_bp_t = gs.transpose(one_forms_bp, (0, 2, 1))\n\n one_forms_a_t = gs.transpose(one_forms_a, (0, 1, 3, 2))\n xa = one_forms_a_t - one_forms_bp_t\n\n xa_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xa, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xa),\n )\n\n one_forms_b_t = gs.transpose(one_forms_b, (0, 1, 3, 2))\n xb = one_forms_b_t - one_forms_bp_t\n xb_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xb, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xb),\n )\n\n return self.d1 * gs.sum(\n gs.einsum(\n \"...bii->...b\",\n gs.matmul(\n xa_0,\n gs.matmul(\n inv_surface_metrics_bp, gs.transpose(xb_0, axes=(0, 1, 3, 2))\n ),\n ),\n )\n * areas_bp\n )", "def mult(m1, m2):\n assert np.shape(m1) == (2, 3)\n assert np.shape(m2) == (2, 3)\n\n m1_temp = np.vstack((m1, [0, 0, 1]))\n m2_temp = np.vstack((m2, [0, 0, 1]))\n result = m1_temp * m2_temp\n\n return result[:2, :]", "def pair_product(x1, x2):\n return np.multiply(x1, x2)", "def Multiply(M1,M2):\r\n M3=[]\r\n w=0\r\n while w<len(M2[0]):\r\n tap=[]\r\n t=0\r\n while t<len(M2):\r\n tap.append(M2[t][w])\r\n t=t+1\r\n M3.append(tap)\r\n w=w+1\r\n M=[]\r\n # Multiplying matrices\r\n k=0\r\n sums=0\r\n while k<len(M1):\r\n j=0\r\n mpy=[]\r\n while j<len(M3):\r\n p=0\r\n sums=0\r\n while p<len(M3[j]):\r\n temp = (M1[k][p])*(M3[j][p])\r\n sums=sums+temp\r\n p=p+1\r\n mpy.append(sums)\r\n j=j+1\r\n M.append(mpy)\r\n k=k+1\r\n return M", "def generate_pattern_grid(words1, words2):\n # Convert word lists to integer arrays\n w1, w2 = (\n np.array([[ord(c) for c in w] for w in words], dtype=np.uint8)\n for words in (words1, words2)\n )\n\n if len(w1) == 0 or len(w2) == 0:\n return np.zeros((len(w1), len(w2)), dtype=np.uint8)\n\n # equality_grid[a, b, i, j] represents whether the ith letter\n # of words1[a] equals the jth letter of words2[b]\n equality_grid = np.zeros((len(w1), len(w2), 5, 5), dtype=bool)\n for i, j in it.product(range(5), range(5)):\n equality_grid[:, :, i, j] = np.equal.outer(w1[:, i], w2[:, j])\n\n patterns = np.zeros((len(w1), len(w2)), dtype=np.uint8)\n three_pows = (3**np.arange(5)).astype(np.uint8)\n for i, tp in enumerate(three_pows):\n # This accounts for yellow squares\n patterns[:, :] += tp * equality_grid[:, :, i, :].any(2)\n # This accounts for green squares\n patterns[:, :] += tp * equality_grid[:, :, i, i]\n\n return patterns", "def complex_mul2d(a, b):\n op = partial(torch.einsum, \"bixy,ioxy->boxy\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def multiplicand_2(p):\n m2 = cddr(p) # (m2 m3 ...)\n rests = cdr(m2) # (m3...)\n if isNull(rests):\n return car(m2)\n else:\n restp = convertToPythonList(cdr(rests))\n return make_product_2(car(m2), car(rests), *restp)", "def strassen(m1, m2):\n \n if ((m1.shape[0] % 2 == 0) or (m1.shape[0] == 1)):\n n = m1.shape[0] \n else:\n n = m1.shape[0] + 1\n result = np.zeros((n, n), dtype = int)\n \n if (n == 1):\n result[0][0] = m1[0][0] * m2[0][0]\n else:\n new = n//2\n \n a11, a12, a21, a22 = m1[:new, :new], m1[new:, :new], m1[:new, new:], m1[new:, new:]\n b11, b12, b21, b22 = m2[:new, :new], m2[new:, :new], m2[:new, new:], m2[new:, new:]\n \n p1 = strassen(a11, b12 - b22)\n p2 = strassen(a11 + a12, b22)\n p3 = strassen(a21 + a22, b11)\n p4 = strassen(a22, b21 - b11)\n p5 = strassen(a11 + a22, b11 + b22)\n p6 = strassen(a12 - a22, b21 + b22)\n p7 = strassen(a11 - a21, b11 + b12)\n \n result[:new, :new] = p5 + p4 - p2 + p6\n result[new:, :new] = p1 + p2\n result[:new, new:] = p3 + p4 \n result[new:, new:] = p5 + p1 - p3 - p7\n \n return result", "def interior_tensor_product(mx, dim_a, dim_b, e=None):\n assert _np.shape(mx) == (dim_a * dim_b, dim_a * dim_b), \"Dimensions do not agree with matrix size\"\n assert _np.shape(e)[0] == _np.shape(e)[1], \"e should be a square matrix\"\n basis_a = matrix_units(dim_a)\n basis_b = matrix_units(dim_b)\n return sum((_np.trace(_np.dot(mx, _np.kron(unit_a, unit_b).T)) * multikron([unit_a, e, unit_b])\n for unit_a in basis_a for unit_b in basis_b))", "def cat_matrices2D(mat1, mat2, axis=0):\n mat11 = [[i for i in j] for j in mat1]\n mat22 = [[i for i in j] for j in mat2]\n if axis == 0:\n if len(mat1[0]) != len(mat2[0]):\n return\n return mat11 + mat22\n if axis == 1:\n if len(mat1) != len(mat2):\n return\n return [mat11[i] + mat22[i] for i in range(len(mat11))]", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def rotate_2d_matrix(matrix):\n\n L = len(matrix)\n for i in range(L // 2):\n for j in range(i, L - i - 1):\n temp = matrix[i][j]\n matrix[i][j] = matrix[L - 1 - j][i]\n matrix[L - 1 - j][i] = matrix[L - 1 - i][L - 1 - j]\n matrix[L - 1 - i][L - 1 - j] = matrix[j][L - 1 - i]\n matrix[j][L - 1 - i] = temp", "def inner_product_similarity(a: torch.Tensor, b: torch.Tensor, dim=1) -> torch.Tensor:\n outputs = (a * b).sum(dim=dim)\n return outputs", "def classical(m1,m2):\n \n n = m1.shape\n result = np.zeros(n, dtype = int)\n\n for i in range(n[0]):\n for j in range(n[0]):\n for k in range(n[0]):\n result[i][j] += m1[i][k] * m2[k][j]\n return result", "def to_matrix(self, system, second_system=None):\n\n if second_system is None:\n second_system = system\n\n return Matrix([i.dot(self).dot(j) for i in system for j in\n second_system]).reshape(3, 3)", "def matrix_as_psfm(matrix):\n return [[2**(ep-2) for ep in row] for row in matrix]", "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def symmetric_matrix_from_2vectors(v1=[1,1,1],v2=[0,0,0]):\n a,b,c=v1\n u,v,w=v2\n return np.complex128(np.matrix([[a,u,v],[u,b,w],[v,w,c]]))", "def partial_transpose(matrix):\n size = len(matrix)\n res_matrix = np.zeros((size,) * 4, dtype=complex)\n for p1 in range(size):\n for p2 in range(size):\n for p1_ in range(size):\n for p2_ in range(size):\n res_matrix[p1, p2, p1_, p2_] = matrix[p1, p2_, p1_, p2]\n return res_matrix", "def mult_img_matrix(imgs, matrix):\n nimgs, ny, nx = imgs.shape\n\n vec = np.reshape(imgs, [nimgs, ny * nx])\n vec_out = matrix.dot(vec)\n imgs_out = np.reshape(vec_out, [nimgs, ny, nx])\n\n return imgs_out", "def _calc_matrix(self):\n\t\tz = self.zoom\n\t\talloc = self.allocation\n\t\tif self.image:\n\t\t\tiw, ih = self.image.get_width(), self.image.get_height()\n\t\telse:\n\t\t\tiw, ih = 0, 0\n#\t\tif __debug__: print self._vadj.lower, self._vadj.value, self._vadj.upper\n\t\t\n\t\ti2w = cairo.Matrix(\n\t\t\tz,0,\n\t\t\t0,z,\n\t\t\t-self._hadj.value if alloc.width < iw*z else (alloc.width - iw*z)/2, \n\t\t\t-self._vadj.value if alloc.height < ih*z else (alloc.height - ih*z)/2,\n\t\t\t)\n\t\t\n\t\tself._i2w_matrix = i2w\n\t\t\n\t\tw2i = cairo.Matrix(*i2w) #copy\n\t\tw2i.invert()\n\t\tself._w2i_matrix = w2i", "def quadratic_expansion(matrix):\n arr = np.copy(matrix)\n arr = np.array([x + x ** 2 for x in arr])\n return np.concatenate((matrix, arr), axis=1)", "def scale(matrix, s):\n x, y = matrix.shape\n print(matrix.shape)\n\n print(np.ones(matrix.shape))\n # b = np.ones(x, y)\n # print(\"ones:\", b)\n return np.kron(a, np.ones((x, y)))", "def _surface_metric_matrices_from_one_forms(one_forms):\n ndim = one_forms.ndim\n transpose_axes = tuple(range(ndim - 2)) + tuple(reversed(range(ndim - 2, ndim)))\n transposed_one_forms = gs.transpose(one_forms, axes=transpose_axes)\n return gs.matmul(one_forms, transposed_one_forms)", "def snorm(x):\n return np.dot(x.flatten().T, x.flatten())", "def row_matrix_col_4d(a, b, A):\n\n\treturn (a[0]*A[0][0]*b[0] + a[1]*A[1][0]*b[0] + a[2]*A[2][0]*b[0] + a[3]*A[3][0]*b[0] +\n\t a[0]*A[0][1]*b[1] + a[1]*A[1][1]*b[1] + a[2]*A[2][1]*b[1] + a[3]*A[3][1]*b[1] +\n\t a[0]*A[0][2]*b[2] + a[1]*A[1][2]*b[2] + a[2]*A[2][2]*b[2] + a[3]*A[3][2]*b[2] +\n\t a[0]*A[0][3]*b[3] + a[1]*A[1][3]*b[3] + a[2]*A[2][3]*b[3] + a[3]*A[3][3]*b[3])", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def pattern_mat(x, m):\n x = np.asarray(x).ravel()\n if m == 1:\n return x\n else:\n N = len(x)\n patterns = np.zeros((m, N-m+1))\n for i in range(m):\n patterns[i, :] = x[i:N-m+i+1]\n return patterns", "def cat_matrices2D(mat1, mat2, axis=0):\n new_matrix = []\n if axis == 0:\n if len(mat1[0]) != len(mat2[0]):\n return None\n for row in mat1:\n new_matrix.append(list(row))\n for row in mat2:\n new_matrix.append(list(row))\n if axis == 1:\n if len(mat1) != len(mat2):\n return None\n for row, element in zip(mat1, mat2):\n new_matrix.append(list(row) + element)\n return new_matrix", "def __mul__(left, right):\n \n if isinstance(left, Plucker) and isinstance(right, Plucker):\n # reciprocal product\n return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)\n elif isinstance(left, Plucker) and arg.ismatrix(right, (4,None)):\n return left.skew @ right; # postmultiply by 4xN", "def _mps_AA(self, A1, A2):\n Dl, d1, _ = A1.shape\n _, d2, Dr = A2.shape\n return np.reshape(np.tensordot(A1, A2, axes=(2, 0)), [Dl, d1 * d2, Dr])", "def _prod_vectorized(M1, M2):\n sh1 = M1.shape\n sh2 = M2.shape\n assert len(sh1) >= 2\n assert len(sh2) >= 2\n assert sh1[-1] == sh2[-2]\n\n ndim1 = len(sh1)\n t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]\n return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *\n M2[..., np.newaxis, :], -3)", "def process_matrix(row_count, column_count, operator):\n\n result = []\n for i in range(row_count):\n row = []\n for j in range(column_count):\n row.append(operator(i, j))\n result.append(row)\n\n return result", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def mk_single_diffy():\n # make matrix:\n mat = zeros((M, M), dtype='d')\n for m in range(M):\n for p in range(m+1, M, 2):\n mat[m,p] = 2*p*oneOverC[m]\n\n return mat", "def multiply_matrices(a, b):\n try:\n x = len(b[0])\n except:\n b = make_2D(b)\n try:\n x = len(a[0])\n except:\n a = make_2D(a)\n if len(a[0]) != len(b):\n print 'error: matrices cannot be multiplied'\n return\n out = np.zeros((len(a), len(b[0])))\n for i in range(len(out)):\n for j in range(len(out[0])):\n sum = 0\n for k in range(len(a[i])):\n sum += a[i][k] * b[k][j]\n out[i][j] = sum\n return out", "def multiM(*args):\r\n filas_1,filas_2 = len(args[0]),len(args[1])\r\n columnas_1,columnas_2 = len(args[0][0]),len(args[1][0])\r\n matriz_r = []\r\n for k in range(filas_1):\r\n matriz_r.append([0]*columnas_2)\r\n for i in range(columnas_2):\r\n matriz_r[k][i] = 0\r\n for i in range(filas_1):\r\n for j in range(columnas_1):\r\n for k in range(columnas_2):\r\n matriz_r[i][k] = matriz_r[i][k] + args[0][i][j] * args[1][j][k]\r\n return matriz_r", "def matrix_multiply(m1, m2):\n\n\tproduct = numpy.matmul(m1, m2)\n\tif type(product) == numpy.int64:\n\t\treturn float(product)\n\telse:\n\t\tresult = list(product)\n\t\treturn result", "def row_matrix_col(a, b, A):\n\n\treturn (a[0]*A[0][0]*b[0] + a[1]*A[1][0]*b[0] + a[2]*A[2][0]*b[0] + \n\t a[0]*A[0][1]*b[1] + a[1]*A[1][1]*b[1] + a[2]*A[2][1]*b[1] + \n\t a[0]*A[0][2]*b[2] + a[1]*A[1][2]*b[2] + a[2]*A[2][2]*b[2])", "def frobenius_inner_prod(mat1, mat2):\n assert mat1.shape==mat2.shape\n # assert isinstance(mat1, Variable) and isinstance(mat2, Variable))\n f = mat1.mul(mat2).sum()\n return f", "def hadmardProduct(self, a,b):\n\t\tnumRows = len(a)\n\t\tnumCols = len(a[0])\n\t\n\t\treturn [[a[j][i] * b[j][i] for i in range(numCols)] for j in range(numRows)]", "def multiply_matrices(list):\n # Section 1: Start matrix product using 1st matrix in list\n matrix_product = list[0]\n\n # Section 2: Loop thru list to create product\n for matrix in list[1:]:\n matrix_product = matrix_multiply(matrix_product, matrix)\n\n return matrix_product", "def __mul__(self, other):\n if self.n != other.m:\n raise TypeError(\"Illegal dimensions for mul operator\")\n tmp = [[0 for _ in xrange(self.n)] for _ in xrange(other.m)]\n for i in xrange(self.n):\n for j in xrange(other.m):\n for k in xrange(other.n):\n tmp[i][j] += self.values[i][k] * other.values[k][j]\n res = []\n for i in tmp:\n res += i\n return simplematrix(self.n, other.m, res)", "def chain_matmul_square(As):\n\n As_matmul = As\n while As_matmul.shape[0] > 1:\n if As_matmul.shape[0] % 2:\n A_last = As_matmul[-1:]\n else:\n A_last = None\n \n As_matmul = torch.matmul(As_matmul[0:-1:2], As_matmul[1::2])\n if A_last is not None:\n As_matmul = torch.cat([As_matmul, A_last], dim=0)\n \n return As_matmul.squeeze(0)", "def cayley_menger_mat(x2, y2, z2, xb2, yb2, zb2):\n one = np.ones_like(x2)\n zero = np.zeros_like(x2)\n mat = np.array([[zero, x2, y2, z2, one], \n [x2, zero, zb2, yb2, one], \n [y2, zb2, zero, xb2, one], \n [z2, yb2, xb2, zero, one], \n [one, one, one, one, zero]\n ]).T\n return mat", "def matrixMult( self, matrix0, matrix1 ):\r\n result = {}\r\n keys = sorted( set( matrix0.keys() ) )\r\n count = range( len( matrix0.keys() ) )\r\n \r\n for key in keys:\r\n result[ key ] = []\r\n for i in count:\r\n sum = 0\r\n for j in count:\r\n sum += matrix0[ key ][j] * matrix1[ keys[j] ][i]\r\n result[ key ].insert( i, sum )\r\n \r\n return result", "def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M", "def row_times_column(m1, row, m2, column):\n\n sum = 0\n for index in range(len(m1)):\n product = m1[row][index] * m2[index][column]\n sum += product\n return sum", "def commute_matrix(A):\n R = resistance_matrix(A)\n E = A.sum() / 2 # number of edges in graph\n C = 2 * E * R\n return C", "def add_matrices2D(mat1, mat2):\n if matrix_shape(mat1) != matrix_shape(mat2):\n return None\n shape = matrix_shape(mat1)\n matrix = []\n for axis1, axis2 in zip(mat1, mat2):\n matrix.append(add_arrays(axis1, axis2))\n return matrix", "def MultiplyMatrix(matrixA, matrixB):\r\n # result matrix initialized as singularity matrix\r\n result = [[0 for y in range(len(matrixB[0]))] for x in range(len(matrixA))]\r\n for i in range(len(matrixA)):\r\n # iterate through columns of Y\r\n for j in range(len(matrixB[0])):\r\n # iterate through rows of Y\r\n for k in range(len(matrixB)):\r\n result[i][j] += matrixA[i][k] * matrixB[k][j]\r\n return result", "def cat_matrices2D(mat1, mat2, axis=0):\n if (len(mat1[0]) == len(mat2[0])) and (axis == 0):\n concat = [ele.copy() for ele in mat1]\n concat += [ele.copy() for ele in mat2]\n return concat\n elif (len(mat1) == len(mat2)) and (axis == 1):\n concat = [mat1[j] + mat2[j] for j in range(len(mat1))]\n return concat\n else:\n return None", "def python_square_matrix(matrix):\n\n transposed_matrix = np.zeros([matrix.shape[0],matrix.shape[0]])\n start = time.time()\n # for i in range(matrix.shape[0]):\n # for j in range(matrix.shape[0]):\n # transposed_matrix[i,j] = matrix[j,i]\n transposed_matrix = np.transpose(matrix)\n end = time.time()-start\n\n #Testing\n if not(np.allclose(transposed_matrix,np.transpose(matrix))):\n print(transposed_matrix)\n\n # print('python transpose time: %.2E' % end)\n return [transposed_matrix, end]", "def add_matrices2D(mat1, mat2):\n\n if matrix_shape(mat1) != matrix_shape(mat2):\n return None\n\n range_ax0 = range(len(mat1)) # range of axis 0\n range_ax1 = range(len(mat1[0])) # range of axis 1\n\n return [[mat1[i][j] + mat2[i][j] for j in range_ax1] for i in range_ax0]" ]
[ "0.74826276", "0.7294018", "0.6634914", "0.6595926", "0.62142396", "0.5985415", "0.59326464", "0.58786714", "0.5865172", "0.5857749", "0.58491534", "0.5798571", "0.5797253", "0.57930493", "0.57865286", "0.57813066", "0.57709336", "0.57322204", "0.5704064", "0.5663638", "0.5653181", "0.5643115", "0.5623832", "0.56153333", "0.5583555", "0.55788", "0.5569403", "0.5546664", "0.55460435", "0.5541713", "0.55014354", "0.5499286", "0.5475835", "0.54653025", "0.5454078", "0.54442793", "0.5419662", "0.54076743", "0.5377725", "0.5368803", "0.5361725", "0.5356933", "0.5344811", "0.5342129", "0.532544", "0.5322299", "0.53211844", "0.5319832", "0.53178275", "0.5301389", "0.5285707", "0.5285187", "0.5266845", "0.5260758", "0.52591664", "0.5254037", "0.52435106", "0.52276844", "0.52141124", "0.5188914", "0.5182223", "0.5179323", "0.51643175", "0.51628023", "0.51574415", "0.51329607", "0.51257557", "0.5121921", "0.511521", "0.510496", "0.5103772", "0.51001793", "0.5093926", "0.50928617", "0.5090669", "0.5088742", "0.50837785", "0.5079877", "0.507776", "0.5072599", "0.50715786", "0.5070323", "0.5070017", "0.5065328", "0.506243", "0.5046779", "0.5046372", "0.50429356", "0.5042517", "0.50415987", "0.5040749", "0.50350493", "0.50325584", "0.5032039", "0.50318533", "0.5030212", "0.5029228", "0.5024405", "0.5020256", "0.5014971" ]
0.6709533
2
Turns the inner product matrix into the |pattern1 pattern2|_{2}^2 matrix. Here the pattern is normalized. Therefore, it can be reformulated into |pattern1 pattern2|_{2}^2 = 2 2 / |pattern1||pattern2|
def inner_product_to_normalized_L2_square(matrix): length = matrix.shape[0] norm = np.divide(1, np.sqrt(l2_square_from_inner_product(matrix))) normalized_inner_product = np.multiply(np.multiply(np.reshape(norm, [length, 1]), matrix), np.reshape(norm, [1, length])) return 2 - 2 * normalized_inner_product
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inner_product(pattern_one, pattern_two):\n\n return np.sum(np.multiply(pattern_one, pattern_two))", "def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two):\n\n \"\"\"\n Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the \n other half.\n \"\"\"\n holder = np.zeros((pattern_num_one, pattern_num_two))\n for l in range(pattern_num_one):\n for m in range(pattern_num_two):\n holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m]))\n\n return holder", "def inner_product_to_L2_square(matrix):\n\n length = matrix.shape[0]\n squared_norm = np.reshape(np.diag(matrix), (length, 1))\n\n return squared_norm + np.transpose(squared_norm) - 2 * matrix", "def l2_square_from_inner_product(matrix):\n return np.diag(matrix)", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def snorm(x):\n return np.dot(x.flatten().T, x.flatten())", "def matrix_subs(matrix_2x2, point):\n arr = []\n for el in matrix_2x2:\n arr.append(el.subs(x1, point[0]).subs(x2, point[1]))\n \n M = Matrix([[arr[0], arr[1]], [arr[2], arr[3]]])\n\n return M", "def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)", "def _inner_product_d1(\n self, one_forms_a, one_forms_b, one_forms_bp, areas_bp, inv_surface_metrics_bp\n ):\n one_forms_bp_t = gs.transpose(one_forms_bp, (0, 2, 1))\n\n one_forms_a_t = gs.transpose(one_forms_a, (0, 1, 3, 2))\n xa = one_forms_a_t - one_forms_bp_t\n\n xa_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xa, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xa),\n )\n\n one_forms_b_t = gs.transpose(one_forms_b, (0, 1, 3, 2))\n xb = one_forms_b_t - one_forms_bp_t\n xb_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xb, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xb),\n )\n\n return self.d1 * gs.sum(\n gs.einsum(\n \"...bii->...b\",\n gs.matmul(\n xa_0,\n gs.matmul(\n inv_surface_metrics_bp, gs.transpose(xb_0, axes=(0, 1, 3, 2))\n ),\n ),\n )\n * areas_bp\n )", "def matrix_mult(m1, m2):\n\ttemp = []\n\tfor i in range(len(m1)):\n\t\te = []\n\t\tfor j in range(len(m2[0])):\n\t\t\te.append(row_times_column(m1,i,m2,j))\n\t\ttemp.append(e)\n\treturn temp", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def l2_norm(pattern):\n return np.linalg.norm(pattern)", "def l2_norm_batch(pattern_stack):\n\n return np.linalg.norm(pattern_stack, axis=0)", "def Mxform(x1,y1,x2,y2):\n return Jones.toMueller([[np.dot(x2,x1), np.dot(x2, y1)], [np.dot(y2,x1), np.dot(y2,y1)]])", "def strassen(m1, m2):\n \n if ((m1.shape[0] % 2 == 0) or (m1.shape[0] == 1)):\n n = m1.shape[0] \n else:\n n = m1.shape[0] + 1\n result = np.zeros((n, n), dtype = int)\n \n if (n == 1):\n result[0][0] = m1[0][0] * m2[0][0]\n else:\n new = n//2\n \n a11, a12, a21, a22 = m1[:new, :new], m1[new:, :new], m1[:new, new:], m1[new:, new:]\n b11, b12, b21, b22 = m2[:new, :new], m2[new:, :new], m2[:new, new:], m2[new:, new:]\n \n p1 = strassen(a11, b12 - b22)\n p2 = strassen(a11 + a12, b22)\n p3 = strassen(a21 + a22, b11)\n p4 = strassen(a22, b21 - b11)\n p5 = strassen(a11 + a22, b11 + b22)\n p6 = strassen(a12 - a22, b21 + b22)\n p7 = strassen(a11 - a21, b11 + b12)\n \n result[:new, :new] = p5 + p4 - p2 + p6\n result[new:, :new] = p1 + p2\n result[:new, new:] = p3 + p4 \n result[new:, new:] = p5 + p1 - p3 - p7\n \n return result", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def strassen_dot(x, y):\n# X = (A B) Y = (E F)\n# (C D) (G H)\n# \n# A through H -- (n/2 x n/2) matrices\n# \n# let P1 = A(F-H), P2 = (A+B)H, P3 = (C+D)E, P4 = D(G-E), \n# \n# P5 = (A+D)(E+H), P6 = (B-D)(G+H), P7 = (A-C)(E+F)\n# \n# XY = (AE+BG AF+BH) = (P5+P4-P2+P6 P1+P2 )\n# (CE+DG CF+DH) (P3+P4 P1+P5-P3-P7)\n\n n = x.shape[0]\n originalN = n\n \n # base case (n<=8)\n if n <= 8:\n return brute_multiply(x, y)\n \n n_is_power_of_two = (n & (n-1) == 0)\n\n if not n_is_power_of_two:\n # count the closest power of 2 above n\n # and resize x and y matrices\n newN = upper_pow_2(n)\n \n n = newN\n \n nX = np.zeros( (n, n) )\n# mapping x on to nX:\n nX[:originalN, :originalN] = x\n x = nX\n \n nY = np.zeros( (n, n) )\n# mapping y on to nY:\n nY[:originalN, :originalN] = y\n y = nY\n \n \n a = x[ 0:n//2, 0:n//2 ]\n b = x[ 0:n//2, n//2: ]\n c = x[ n//2:, 0:n//2 ]\n d = x[ n//2:, n//2: ]\n\n e = y[ 0:n//2, 0:n//2 ]\n f = y[ 0:n//2, n//2: ]\n g = y[ n//2:, 0:n//2 ]\n h = y[ n//2:, n//2: ]\n \n# let P1 = A(F-H), P2 = (A+B)H, P3 = (C+D)E, P4 = D(G-E), \n# \n# P5 = (A+D)(E+H), P6 = (B-D)(G+H), P7 = (A-C)(E+F)\n \n # recursively compute 7 (SEVEN) products P1 .. P7\n p1 = strassen_dot(a, f-h)\n p2 = strassen_dot(a+b, h)\n p3 = strassen_dot(c+d, e)\n p4 = strassen_dot(d, g-e)\n p5 = strassen_dot(a+d, e+h)\n p6 = strassen_dot(b-d, g+h)\n p7 = strassen_dot(a-c, e+f)\n \n# do the necessary (clever) additions & substractions with P1..P7\n \n# XY = (AE+BG AF+BH) = (P5+P4-P2+P6 P1+P2 )\n# (CE+DG CF+DH) (P3+P4 P1+P5-P3-P7)\n res1 = np.hstack( (p5+p4-p2+p6, p1+p2) )\n res2 = np.hstack( (p3+p4, p1+p5-p3-p7) )\n res = np.vstack( (res1, res2) )\n\n return res[:originalN, :originalN]", "def inner_product_similarity(a: torch.Tensor, b: torch.Tensor, dim=1) -> torch.Tensor:\n outputs = (a * b).sum(dim=dim)\n return outputs", "def normalize(self):\n det = self._mat[0][0]*self._mat[1][1] - self._mat[0][1]*self._mat[1][0]\n for i in range(2):\n for j in range(2):\n self._mat[i][j] = (self._mat[i][j])/(np.sqrt(det))", "def inner_product(alpha, F, beta):\n return np.dot(alpha, np.dot(F, beta))", "def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)", "def StrassenMatrixM(a, b):\r\n if len(a) != 2 or len(a[0]) != 2 or len(b) != 2 or len(b[0]) != 2:\r\n raise Exception('Matrices should be 2x2!')\r\n print(a[0][0] * b[0][1] + a[0][1] * b[1][1])\r\n matrix = [[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],\r\n [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]]]\r\n\r\n return matrix", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def outer_product(A, B): \n print(A)\n print(B)\n A_rows = len(A)\n A_columns = len(A[0])\n\n B_rows = len(B)\n B_columns = len(B[0])\n \n if A_columns == 1 and B_rows == 1:\n \n outer_product = []\n\n # multi-line list comprehension for outer product\n [outer_product.append([A[i][0] * B[0][j] for j in range(B_columns)]) \n for i in range(A_rows)]\n\n return outer_product\n\n else:\n print(\"dimensions of vector do not match.\")", "def solve_2x2(matrix: FieldMatrix,\n rhs: Fields) -> OutputFields:\n _validate_matrix_shape(matrix, (2, 2))\n\n a, b = matrix[0]\n c, d = matrix[1]\n e, f = rhs\n\n inv_factor = det_2x2(matrix)\n\n return [\n tf.nest.map_structure(tf.math.divide_no_nan, det_2x2([\n [e, b],\n [f, d],\n ]), inv_factor),\n tf.nest.map_structure(tf.math.divide_no_nan, det_2x2([\n [a, e],\n [c, f],\n ]), inv_factor),\n ]", "def _kronecker_product(mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor:\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def prod_mat(self,other):\n [rs,cs],[ro,co] = self.D,other.D\n assert cs == ro, \"tailles incompatibles\"\n return Mat([rs,co], lambda i,j : prod_scal(self.ligne(i),other.col(j)))", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def pfm(pattern_sequence):\n if isinstance(pattern_sequence, Pattern):\n sequences = []\n for match_sequences in pattern_sequence.matchtable_pset.match_sequences.itervalues():\n for strand, sequence in match_sequences:\n if strand == 2:\n sequences.append(revcomp(sequence))\n else:\n sequences.append(sequence)\n else:\n sequences = pattern_sequence\n\n ncol = len(sequences[0])\n matrix = {\n 'a': [0] * ncol,\n 't': [0] * ncol,\n 'c': [0] * ncol,\n 'g': [0] * ncol,\n }\n total = [0] * ncol\n\n for s in sequences:\n for i, j in enumerate(s):\n matrix.get(j)[i] += 1\n total[i] += 1\n\n # Normalization\n for i in xrange(ncol):\n matrix.get('a')[i] = float(matrix.get('a')[i]) / total[i]\n matrix.get('t')[i] = float(matrix.get('t')[i]) / total[i]\n matrix.get('c')[i] = float(matrix.get('c')[i]) / total[i]\n matrix.get('g')[i] = float(matrix.get('g')[i]) / total[i]\n\n return matrix", "def recursive_multiply(a, b):\n if len(a) == 2:\n return naive_multiply(a, b)\n\n a11 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a11):\n a11[index] = row[0:int(len(row) / 2)]\n\n a12 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a12):\n a12[index] = row[int(len(a) / 2):len(a)]\n\n a21 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a21):\n a21[index] = row[0:int(len(row) / 2)]\n\n a22 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a22):\n a22[index] = row[int(len(a) / 2):len(a)]\n\n b11 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b11):\n b11[index] = row[0:int(len(row) / 2)]\n\n b12 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b12):\n b12[index] = row[int(len(b) / 2):len(b)]\n\n b21 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b21):\n b21[index] = row[0:int(len(row) / 2)]\n\n b22 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b22):\n b22[index] = row[int(len(b) / 2):len(b)]\n\n c11 = matrix_add(recursive_multiply(a11, b11), recursive_multiply(a12, b21)) # C11 = A11*B11 + A12*B21\n c12 = matrix_add(recursive_multiply(a11, b12), recursive_multiply(a12, b22)) # C12 = A11*B12 + A12*B22\n c21 = matrix_add(recursive_multiply(a21, b11), recursive_multiply(a22, b21)) # C21 = A21*B11 + A22*B21\n c22 = matrix_add(recursive_multiply(a21, b12), recursive_multiply(a22, b22)) # C22 = A21*B12 + A22*B22\n\n # Append c12 to c11\n for row_index, row in enumerate(c11):\n for col_index, col in enumerate(c12):\n row.append(c12[row_index][col_index])\n\n # Append c22 to c21\n for row_index, row in enumerate(c21):\n for col_index, col in enumerate(c12):\n row.append(c22[row_index][col_index])\n\n # Append c21 to c11\n for i in c21:\n c11.append(i)\n\n return c11", "def outer_product(x):\n return keras.backend.batch_dot(\n x[0]\n , x[1]\n , axes=[1,1]\n ) / x[0].get_shape().as_list()[1]", "def _inner_product_c1(self, point_a, point_b, normals_bp, areas_bp):\n dna = self._space.normals(point_a) - normals_bp\n dnb = self._space.normals(point_b) - normals_bp\n return self.c1 * gs.sum(\n gs.einsum(\"...bi,...bi->...b\", dna, dnb) * areas_bp, axis=-1\n )", "def robust_outer_product(vec_1, vec_2):\n mantissa_1, exponents_1 = np.frexp(vec_1)\n mantissa_2, exponents_2 = np.frexp(vec_2)\n new_mantissas = mantissa_1[None, :] * mantissa_2[:, None]\n new_exponents = exponents_1[None, :] + exponents_2[:, None]\n return new_mantissas * np.exp2(new_exponents)", "def __mul__(self, other):\n # \n # TODO - your code here\n #\n \n result = [];\n row_result = [];\n product = 0;\n \n if(self.w != other.h):\n raise(ValueError, \"Matrices can not multiply for their dimesion doesn't match\"); \n \n for row in self.g:\n row_result = [];\n for j in range(other.w):\n product = dot_product(row,other.get_column(j));\n row_result.append(product);\n result.append(row_result);\n \n return Matrix(result);", "def matrix_mult(m1, m2):\n pass", "def generate_pattern_grid(words1, words2):\n # Convert word lists to integer arrays\n w1, w2 = (\n np.array([[ord(c) for c in w] for w in words], dtype=np.uint8)\n for words in (words1, words2)\n )\n\n if len(w1) == 0 or len(w2) == 0:\n return np.zeros((len(w1), len(w2)), dtype=np.uint8)\n\n # equality_grid[a, b, i, j] represents whether the ith letter\n # of words1[a] equals the jth letter of words2[b]\n equality_grid = np.zeros((len(w1), len(w2), 5, 5), dtype=bool)\n for i, j in it.product(range(5), range(5)):\n equality_grid[:, :, i, j] = np.equal.outer(w1[:, i], w2[:, j])\n\n patterns = np.zeros((len(w1), len(w2)), dtype=np.uint8)\n three_pows = (3**np.arange(5)).astype(np.uint8)\n for i, tp in enumerate(three_pows):\n # This accounts for yellow squares\n patterns[:, :] += tp * equality_grid[:, :, i, :].any(2)\n # This accounts for green squares\n patterns[:, :] += tp * equality_grid[:, :, i, i]\n\n return patterns", "def interior_tensor_product(mx, dim_a, dim_b, e=None):\n assert _np.shape(mx) == (dim_a * dim_b, dim_a * dim_b), \"Dimensions do not agree with matrix size\"\n assert _np.shape(e)[0] == _np.shape(e)[1], \"e should be a square matrix\"\n basis_a = matrix_units(dim_a)\n basis_b = matrix_units(dim_b)\n return sum((_np.trace(_np.dot(mx, _np.kron(unit_a, unit_b).T)) * multikron([unit_a, e, unit_b])\n for unit_a in basis_a for unit_b in basis_b))", "def mult(m1, m2):\n assert np.shape(m1) == (2, 3)\n assert np.shape(m2) == (2, 3)\n\n m1_temp = np.vstack((m1, [0, 0, 1]))\n m2_temp = np.vstack((m2, [0, 0, 1]))\n result = m1_temp * m2_temp\n\n return result[:2, :]", "def mat_mul(mat1, mat2):\n\n rows1 = len(mat1)\n cols1 = len(mat1[0])\n rows2 = len(mat2)\n cols2 = len(mat2[0])\n\n if cols1 != rows2:\n return None\n else:\n new_matrix = []\n for x in range(rows1):\n aux_row = []\n for y in range(cols2):\n aux_sum = []\n for z in range(cols1):\n aux_sum.append(mat1[x][z] * mat2[z][y])\n aux_row.append(sum(aux_sum))\n new_matrix.append(aux_row)\n\n return new_matrix", "def frobenius_inner_prod(mat1, mat2):\n assert mat1.shape==mat2.shape\n # assert isinstance(mat1, Variable) and isinstance(mat2, Variable))\n f = mat1.mul(mat2).sum()\n return f", "def transformation_2d(vertices, kernels=KERNELS):\n\t# calculate the transpose matrix of vertices\n\ttranspose = vertices.transpose()\n\t# insert a row of ones in the transpose matrix's end, then insert the result in 'matrices' list\n\tkernels.append(np.append(transpose, [np.ones(len(transpose[0]))], axis=0))\n\t# multiply matrices into 'kernels' list,\n\t# remove the last row (of ones) and calculate the transpose matrix of the result\n\tfinal_transformation_result = np.delete(np.linalg.multi_dot(kernels), 2, 0).transpose()\n\tKERNELS.clear()\n\treturn final_transformation_result", "def classical(m1,m2):\n \n n = m1.shape\n result = np.zeros(n, dtype = int)\n\n for i in range(n[0]):\n for j in range(n[0]):\n for k in range(n[0]):\n result[i][j] += m1[i][k] * m2[k][j]\n return result", "def complex_mul2d(a, b):\n op = partial(torch.einsum, \"bixy,ioxy->boxy\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def __mul__(self,m):\n if type(m) != Matrix:\n raise TypeError('The second argument is not a matrix lol')\n if self.ncols != m.nrows:\n raise ValueError('matrix dot argument has incorrect number of rows')\n new = Matrix(self.nrows,m.ncols)\n columns = m.getCols()\n rowindex = 0\n colindex = 0 \n for row in self.matrix:\n colindex = 0 \n for col in columns:\n summ = 0\n for i,j in zip(row,col):\n summ+= i*j \n new.matrix[rowindex][colindex] = summ\n print new.matrix\n colindex += 1 \n rowindex+=1\n return new", "def matrix_mult(A,B):\n\n m = len(A)\n p = len(B)\n n = len(B[0])\n AB = []\n for i in range(m):\n AB.append([])\n for j in range(n):\n total = 0\n for k in range(p):\n total += A[i][k] * B[k][j]\n AB[i].append(total)\n return AB", "def _e_2d_(p, a):\r\n diff = a - p[np.newaxis, :]\r\n return np.einsum('ij,ij->i', diff, diff)", "def mul(self,mat1,mat2):\n if(isinstance(mat2,int)==True):\n result = [[mat1[i][j] * mat2 for j in range(len(mat1[0]))] for i in range(len(mat1))]\n self.out = result\n return self.out\n elif(len(mat1[0])==len(mat2)):\n result = [[sum(a*b for a,b in zip(i,j)) for j in zip(*mat2)] for i in mat1]\n self.out = result\n return self.out", "def mmultiply(self, matrix):\n try:\n result_matrix = [[0 for row in range(len(self.matrix))] for col in range(len(matrix[0]))]\n for i in range(len(self.matrix)):\n for j in range(len(matrix[0])):\n for k in range(len(matrix)):\n result_matrix[i][j] += self.matrix[i][k] * matrix[k][j]\n self.matrix = result_matrix\n except IndexError:\n pass\n pass", "def test_two_qubit_weyl_decomposition_bgate(self):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(np.pi / 4, np.pi / 8, 0)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def matrix_as_psfm(matrix):\n return [[2**(ep-2) for ep in row] for row in matrix]", "def multiply_by_left_matrix(matrix, img):\n first = np.inner(matrix[0], img)\n second = np.inner(matrix[1], img)\n third = np.inner(matrix[2], img)\n\n res = np.dstack((first, second, third))\n return res", "def matrixMul(self, matrix, matrix2):\n matrix0 = matrix[:]\n matrix[0] = matrix0[0] * matrix2[0] + matrix0[2]*matrix2[1] # + matrix0[4]*0\n matrix[1] = matrix0[1] * matrix2[0] + matrix0[3]*matrix2[1] # + matrix0[5]*0\n matrix[2] = matrix0[0] * matrix2[2] + matrix0[2]*matrix2[3] # + matrix0[4]*0\n matrix[3] = matrix0[1] * matrix2[2] + matrix0[3]*matrix2[3] # + matrix0[5]*0\n matrix[4] = matrix0[0] * matrix2[4] + matrix0[2]*matrix2[5] + matrix0[4]\n matrix[5] = matrix0[1] * matrix2[4] + matrix0[3]*matrix2[5] + matrix0[5]", "def python_nonsquare_matrix_mult(matrix):\n\n transposed_matrix = np.zeros([matrix.shape[1],matrix.shape[0]])\n start = time.time()\n # for i in range(matrix.shape[0]):\n # for j in range(matrix.shape[1]):\n # transposed_matrix[j,i] = matrix[i,j]\n\n transposed_matrix = np.transpose(matrix)\n product = matrix.dot(transposed_matrix)\n\n # transposed_matrix = np.transpose(matrix)\n end = time.time()-start\n\n # print(\"Python Golden Transpose: %s\" % product)\n # print('python transpose time: %.2E' % end)\n return [product, end]", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def _calc_matrix(self):\n\t\tz = self.zoom\n\t\talloc = self.allocation\n\t\tif self.image:\n\t\t\tiw, ih = self.image.get_width(), self.image.get_height()\n\t\telse:\n\t\t\tiw, ih = 0, 0\n#\t\tif __debug__: print self._vadj.lower, self._vadj.value, self._vadj.upper\n\t\t\n\t\ti2w = cairo.Matrix(\n\t\t\tz,0,\n\t\t\t0,z,\n\t\t\t-self._hadj.value if alloc.width < iw*z else (alloc.width - iw*z)/2, \n\t\t\t-self._vadj.value if alloc.height < ih*z else (alloc.height - ih*z)/2,\n\t\t\t)\n\t\t\n\t\tself._i2w_matrix = i2w\n\t\t\n\t\tw2i = cairo.Matrix(*i2w) #copy\n\t\tw2i.invert()\n\t\tself._w2i_matrix = w2i", "def test_l2_metric_inner_product_vectorization(\n self,\n l2_metric_s2,\n times,\n n_landmark_sets,\n landmarks_a,\n landmarks_b,\n landmarks_c,\n ):\n landmarks_ab = l2_metric_s2.geodesic(landmarks_a, landmarks_b)\n landmarks_bc = l2_metric_s2.geodesic(landmarks_b, landmarks_c)\n landmarks_ab = landmarks_ab(times)\n landmarks_bc = landmarks_bc(times)\n\n tangent_vecs = l2_metric_s2.log(point=landmarks_bc, base_point=landmarks_ab)\n\n result = l2_metric_s2.inner_product(tangent_vecs, tangent_vecs, landmarks_ab)\n\n self.assertAllClose(gs.shape(result), (n_landmark_sets,))", "def __mul__(left, right):\n \n if isinstance(left, Plucker) and isinstance(right, Plucker):\n # reciprocal product\n return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)\n elif isinstance(left, Plucker) and arg.ismatrix(right, (4,None)):\n return left.skew @ right; # postmultiply by 4xN", "def kronecker_product(mat1, mat2):\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z", "def _prod_vectorized(M1, M2):\n sh1 = M1.shape\n sh2 = M2.shape\n assert len(sh1) >= 2\n assert len(sh2) >= 2\n assert sh1[-1] == sh2[-2]\n\n ndim1 = len(sh1)\n t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]\n return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *\n M2[..., np.newaxis, :], -3)", "def _mps_AA(self, A1, A2):\n Dl, d1, _ = A1.shape\n _, d2, Dr = A2.shape\n return np.reshape(np.tensordot(A1, A2, axes=(2, 0)), [Dl, d1 * d2, Dr])", "def hadmardProduct(self, a,b):\n\t\tnumRows = len(a)\n\t\tnumCols = len(a[0])\n\t\n\t\treturn [[a[j][i] * b[j][i] for i in range(numCols)] for j in range(numRows)]", "def _inner_product_a1(self, ginvdga, ginvdgb, areas_bp):\n return self.a1 * gs.sum(\n gs.einsum(\"...bii->...b\", gs.matmul(ginvdga, ginvdgb)) * areas_bp,\n axis=-1,\n )", "def divide(m1,m2):\n \n if ((m1.shape[0] % 2 == 0) or (m1.shape[0] == 1)):\n n = m1.shape[0]\n else:\n n = m1.shape[0] + 1\n result = np.zeros((n, n), dtype = int)\n \n if (n == 1):\n result[0][0] = m1[0][0] * m2[0][0]\n else:\n new = n//2\n \n a11, a12, a21, a22 = m1[:new, :new], m1[new:, :new], m1[:new, new:], m1[new:, new:]\n b11, b12, b21, b22 = m2[:new, :new], m2[new:, :new], m2[:new, new:], m2[new:, new:]\n \n result[:new, :new] = divide(a11,b11) + divide(a12,b21)\n result[new:, :new] = divide(a11,b12) + divide(a12,b22)\n result[:new, new:] = divide(a21,b11) + divide(a22,b21)\n result[new:, new:] = divide(a21,b12) + divide(a22,b22)\n \n return result", "def get_molecular_matrix(single_body, two_body):\n x, y = single_body.shape\n func = np.vectorize(round_custom)\n _new_dim = x * y\n single_one_dim = single_body.reshape(_new_dim, 1)\n two_body_two_dim = func(two_body.reshape(_new_dim, _new_dim))\n idx = 0\n x, _ = two_body_two_dim.shape\n while idx < x:\n two_body_two_dim[idx][idx] = round_custom(single_one_dim[idx][0])\n idx += 1\n return two_body_two_dim", "def innerprod_q2(q1, q2):\n T = q1.shape[1]\n val = sum(sum(q1 * q2)) / T\n\n return (val)", "def matrix_mult(m1, m2):\n output = []\n for rowIndex, row in enumerate(m1): #go through rows in m1\n new_row = []\n for columnIndex in range(len(m2[0])): #go through indices for each column of m2\n sum = 0\n for index3 in range(len(row)):\n product = m1[rowIndex][index3] * m2[index3][columnIndex]\n sum += product\n new_row.append(sum)\n output.append(new_row)\n return output\n \n \n #output = []\n #first for loop corresponds to the rows of my output matrix and loops through the rows of m1 (enumerate)\n #create an empty new row\n # second for loop, loops through columns of m2\n # create sum variable, initialize it with zero\n # third for loop, multiplies the index of the row in m1 times the index of the column in m2\n # add sum to product and assign this to the sum variable\n # append sum to new row\n # append new row to output\n # return output", "def matrix_mult_matrix(matrix_a, matrix_b):\n m = len(matrix_a)\n n = len(matrix_b)\n result = []\n matrix_b_t = transpose_matrix(matrix_b)\n for i in xrange(m):\n row = []\n\tfor j in xrange(m):\n row.append(dot_product(matrix_a[i], matrix_b_t[j]))\n\tresult.append(row)\n return result", "def multiplicand_2(p):\n m2 = cddr(p) # (m2 m3 ...)\n rests = cdr(m2) # (m3...)\n if isNull(rests):\n return car(m2)\n else:\n restp = convertToPythonList(cdr(rests))\n return make_product_2(car(m2), car(rests), *restp)", "def similarity_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n mu = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n mu += w[i]*(p_adj.dot(p_adj))\r\n A_fac = mat2([v.x - p_wgt.x, v.y - p_wgt.y, v.y - p_wgt.y, p_wgt.x - v.x])\r\n v_out = vec2(0, 0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A = mat2([p_adj.x, p_adj.y, p_adj.y, -p_adj.x])*A_fac*w[i]\r\n A = A.transpose()\r\n v_out += A*q_adj/mu\r\n v_out += q_wgt\r\n return v_out", "def outer_subspace_product(subspace1: list, subspace2: list, reverse: bool = False) -> list:\n assert isinstance(subspace1, list), 'Please provide subspace as a list of str.'\n assert isinstance(subspace2, list), 'Please provide subspace as a list of str.'\n\n product_subspace = []\n if subspace1 == []: return subspace2\n if subspace2 == []: return subspace1\n\n for basisvector1 in subspace1:\n for basisvector2 in subspace2:\n if reverse:\n product_subspace += [basisvector2 + basisvector1]\n else:\n product_subspace += [basisvector1 + basisvector2]\n return product_subspace", "def mosaicing(img, pattern):\n imout = np.zeros((img.shape[0], img.shape[1]))\n for i in range(2):\n for j in range(2):\n imout[i::2, j::2] = img[i::2, j::2, pattern[i][j]]\n \n return imout", "def mat_mul(mat1, mat2):\n\n if len(mat1[0]) == len(mat2):\n\n mat2 = matrix_transpose(mat2)\n response = []\n\n for row in range(len(mat1)):\n response.append(\n [\n sum(dot_product(mat1[row], mat2[column]))\n for column in range(len(mat2))\n ]\n )\n\n return response\n\n else:\n return None", "def form_square_block_matrix(mat1,mat2):\n if mat1.cols==1:\n mat3 = mp.matrix(mat1.rows+mat2.rows,1)\n mat3[:mat1.rows] = mat1[:]\n mat3[mat1.rows:mat3.rows] = mat2[:]\n else:\n mat3 = mp.matrix(mat1.rows+mat2.rows, mat1.rows+mat2.rows)\n mat3[:mat1.rows,:mat1.rows] = mat1[:,:]\n mat3[mat1.rows:mat3.rows,mat1.rows:mat3.rows] = mat2[:,:]\n return mat3", "def _prepare_outer_matrix(self):\n self._mat_plane = numpy.array([\n self._scaling[0], 0, 0, 0,\n 0, self._scaling[1], 0, 0,\n 0, 0, 1, 0,\n self.i_border[0], -self.i_border[1], 0, 1\n ], dtype=numpy.float32)", "def pattern_modifier(pattern, X, y):\n mod_pattern = pattern.reshape(n_channels, n_samples)\n mod_pattern = mod_pattern * kernel[np.newaxis, :]\n return mod_pattern.reshape(pattern.shape)", "def pair_product(x1, x2):\n return np.multiply(x1, x2)", "def det_matrix_2x2(m: list):\n return m[0][0]*m[1][1] - m[0][1]*m[1][0]", "def __mul__(self,v2):\n\t\tif(isinstance(v2,Vect2D)):\n\t\t\treturn np.dot(self._vec,v2._vec)\n\t\telse:\n\t\t\treturn Vect2D(v2*self._vec)", "def rotate2D(self, matrix) -> None:\n N = len(matrix)\n\n # In case of N is odd, the innermost square belt is just one cell, no need of rotating.\n for i in range(0,int(N/2)): # outer loop for each square belt\t\t\t\n for j in range(i,N-i-1): # N-i group in the i-th square belt\n #print(i,j)\n tmp = matrix[i][j]\n matrix[i][j] = matrix[N-j-1][i]\n matrix[N-j-1][i] = matrix[N-i-1][N-j-1]\n matrix[N-i-1][N-j-1] = matrix[j][N-i-1]\n matrix[j][N-i-1] = tmp\n #print(matrix)", "def _surface_metric_matrices_from_one_forms(one_forms):\n ndim = one_forms.ndim\n transpose_axes = tuple(range(ndim - 2)) + tuple(reversed(range(ndim - 2, ndim)))\n transposed_one_forms = gs.transpose(one_forms, axes=transpose_axes)\n return gs.matmul(one_forms, transposed_one_forms)", "def basic_geometric_product(obj1, obj2):\n def mul_table(b1, b2):\n return MV.base_mul_table[(b1, b2)]\n\n obj12 = bilinear_product(obj1 * obj2, mul_table)\n\n return obj12", "def _inner_product_a2(\n self, tangent_vec_a, tangent_vec_b, base_point, vertex_areas_bp\n ):\n laplacian_at_base_point = self._space.laplacian(base_point)\n return self.a2 * gs.sum(\n gs.einsum(\n \"...bi,...bi->...b\",\n laplacian_at_base_point(tangent_vec_a),\n laplacian_at_base_point(tangent_vec_b),\n )\n / vertex_areas_bp,\n axis=-1,\n )", "def _pmatrix(kn_u, kn_d, thickness):\n p = np.zeros((kn_u.size, 4, 4), np.complex128)\n\n p0 = np.exp(complex(0, 1) * kn_u * thickness)\n p1 = np.exp(complex(0, 1) * kn_d * thickness)\n\n p[:, 0, 0] = 1 / p0\n p[:, 1, 1] = p0\n p[:, 2, 2] = 1 / p1\n p[:, 3, 3] = p1\n\n return p", "def _inner_product_b1(self, ginvdga, ginvdgb, areas_bp):\n return self.b1 * gs.sum(\n gs.einsum(\"...bii->...b\", ginvdga)\n * gs.einsum(\"...bii->...b\", ginvdgb)\n * areas_bp,\n axis=-1,\n )", "def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod", "def row_matrix_col_4d(a, b, A):\n\n\treturn (a[0]*A[0][0]*b[0] + a[1]*A[1][0]*b[0] + a[2]*A[2][0]*b[0] + a[3]*A[3][0]*b[0] +\n\t a[0]*A[0][1]*b[1] + a[1]*A[1][1]*b[1] + a[2]*A[2][1]*b[1] + a[3]*A[3][1]*b[1] +\n\t a[0]*A[0][2]*b[2] + a[1]*A[1][2]*b[2] + a[2]*A[2][2]*b[2] + a[3]*A[3][2]*b[2] +\n\t a[0]*A[0][3]*b[3] + a[1]*A[1][3]*b[3] + a[2]*A[2][3]*b[3] + a[3]*A[3][3]*b[3])", "def same_side_product(p, q, a, b):\n return line_ccw(a, b, p) * line_ccw(a, b, q)", "def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv", "def square_matrix_multiply(a, b):\n n = len(a)\n c = [[0]*n for _ in range(n)]\n for i in range(n):\n for j in range(n):\n sm = 0\n for k in range(n):\n sm += (a[i][k] * b[k][j])\n c[i][j] = sm\n\n return c", "def batch_outer_product(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # This is a batchwise version of the matrix multiplication approach\n # used for outer_product(), see explanation there.\n return a[:, :, np.newaxis] * b[:, np.newaxis, :]", "def _mult_poly_matrix_poly(p, mat_y):\n\n mult_op = lambda q: np.convolve(p, q)\n p_times_y = np.apply_along_axis(mult_op, 2, mat_y)\n return p_times_y", "def similarity_transformation(rot, mat):\n return np.dot(rot, np.dot(mat, np.linalg.inv(rot)))", "def similarity_transformation(rot, mat):\n return np.dot(rot, np.dot(mat, np.linalg.inv(rot)))", "def normalize_adj(adj):\n\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = np.diag(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)", "def flat_to_2d(data, det_width):\n return data.reshape((data.shape[0], data.shape[1], det_width, det_width))", "def reconstruct(A, B, z):\n f = factorint(igcd(A, B))\n for p, e in f.items():\n if e != 1:\n raise ValueError('a and b should be square-free')\n z *= p\n return z", "def matrix_multiplication_loop(x_matrix, y_matrix):\n result = []\n for i, row in enumerate(x_matrix):\n row_vector = []\n for j in range(len(y_matrix[0])):\n product = 0\n for k in range(len(row)):\n product += x_matrix[i][k] * y_matrix[k][j]\n row_vector.append(product)\n result.append(row_vector)\n return result", "def norm2_r(self, a: np.ndarray) -> float:\n return a.T @ a" ]
[ "0.7459909", "0.69026655", "0.62501264", "0.59727603", "0.58457905", "0.56935406", "0.56712896", "0.5659509", "0.5638323", "0.562021", "0.55866134", "0.5575207", "0.5560609", "0.55025274", "0.5491025", "0.5439844", "0.5381539", "0.53541124", "0.5343363", "0.53310347", "0.532383", "0.5305266", "0.5295224", "0.5279774", "0.5244985", "0.5239988", "0.52263427", "0.5226059", "0.52230066", "0.52128536", "0.5181816", "0.5176417", "0.5169206", "0.5167986", "0.515937", "0.5158919", "0.5122471", "0.511444", "0.51063055", "0.5102178", "0.50925946", "0.50807035", "0.5063364", "0.5060758", "0.5054657", "0.50530577", "0.5046044", "0.5039231", "0.50386906", "0.5034785", "0.50297636", "0.50193775", "0.5017412", "0.49813205", "0.49807236", "0.49756384", "0.4971787", "0.49711654", "0.49709055", "0.49507028", "0.4949671", "0.4949146", "0.49441355", "0.49376434", "0.49333808", "0.49317148", "0.49287587", "0.4920873", "0.4915144", "0.49130973", "0.49079886", "0.49035653", "0.49025", "0.49016878", "0.490036", "0.48962334", "0.4892124", "0.48908907", "0.48675817", "0.48663434", "0.486304", "0.48628187", "0.48535436", "0.4853111", "0.48429668", "0.48423478", "0.48405617", "0.4834119", "0.48292828", "0.48246852", "0.48211947", "0.48206893", "0.4818812", "0.48181638", "0.48181638", "0.4814729", "0.4810796", "0.48060337", "0.48024935", "0.48022324" ]
0.5993504
3
Apply np.exp( matrix/two_sigma_square) elementwise.
def gaussian_dense(matrix, two_sigma_square): return np.exp(- matrix / two_sigma_square)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exp(tensor):\n return _elementary_op(tensor, np.exp, np.exp)", "def expval(op, dm):\n return np.tensordot(op, dm, ([0, 1], [0, 1]))", "def Exp(A, B):\n return A.dot(expm(B))", "def kernel_sqExp(a,b, ls=1, sv=1):\n a = a.T/ls\n b = b.T/ls\n D, n = np.shape(a)\n d, m = np.shape(b)\n sqdist = np.tile((a**2).T, [1, m]) + np.tile(b*b, [n, 1]) - 2*np.dot(a.T,b)\n my_kernel = (sv**2) * np.exp(-0.5*sqdist)\n \n # written all out to illustrate (need to make sure a, b are in original dimensions):\n# my_kernel2 = np.zeros((n, m))\n# for i in range(n):\n# for j in range(m):\n# ai = a[i]\n# bj = b[j]\n# my_kernel2[i, j] = np.exp(-1/(2*ls**2) * (ai-bj)**2 )\n# my_kernel2 = my_kernel2 * (sv**2)\n \n return my_kernel", "def expms(A, eig=np.linalg.eigh):\r\n # TODO: check that this works reliably for low rank matrices\r\n # first: symmetrize A\r\n D, B = eig(A)\r\n return np.dot(B, (np.exp(D) * B).T)", "def exponentialfcn(x: np.ndarray) -> np.ndarray:\n\n x2 = x**2\n scores = -np.exp(-0.5 * np.sum(x2, axis=1))\n return scores", "def ga_exp(B):\n if np.sum(np.abs(B.value)) < np.finfo(float).eps:\n return cf.MultiVector(layout, unit_scalar_mv.value)\n return cf.MultiVector(layout, val_exp(B.value))", "def gaussian_smearing(E: NDArray[Shape['Ngrid'], Number],\n E0: NDArray[Shape['*, ...'], Number],\n sigma: float):\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -(np.broadcast_to(E, E0.shape + E.shape) - np.expand_dims(E0, len(E0.shape))) ** 2 / (2 * sigma ** 2))", "def exp_env(N, sr, lam = 3):\n return np.exp(-lam*np.arange(N)/sr)", "def get_est_exp_discount_function(self,params):\n params = params[0:5]\n df = pd.DataFrame(self.maturity.apply(lambda x: x ** i) for i in range(1, 6)).T\n df.columns = ['M1', 'M2', 'M3', 'M4', 'M5']\n return np.exp(df.dot(params))", "def sum_exp(self,time):\n sum = np.sum([self.A[i]*np.exp(-1.*self.lambdas[i]*time)\\\n for i in xrange(self.m)])\n return sum", "def compute_MSE(e):\n\n return 1/2*np.mean(e**2)", "def gauss_2d(N, sigma = 0.25):\r\n x, y = make_xy(N)\r\n sigma_pixel = sigma * np.float(N)\r\n arrayout = np.exp(-(x**2 + y**2) / sigma_pixel**2) / (np.pi * sigma_pixel**2)\r\n return arrayout", "def calculate_mse(e):\n return 1/2*np.mean(e.dot(e))", "def test_perform_sigm_times_exp(self):\r\n x, y, z, t = tensor.vectors('x', 'y', 'z', 't')\r\n exp = tensor.exp\r\n\r\n def ok(expr1, expr2):\r\n trees = [parse_mul_tree(e) for e in (expr1, expr2)]\r\n perform_sigm_times_exp(trees[0])\r\n trees[0] = simplify_mul(trees[0])\r\n good = theano.gof.graph.is_same_graph(\r\n compute_mul(trees[0]),\r\n compute_mul(trees[1]))\r\n if not good:\r\n print trees[0]\r\n print trees[1]\r\n print '***'\r\n theano.printing.debugprint(compute_mul(trees[0]))\r\n print '***'\r\n theano.printing.debugprint(compute_mul(trees[1]))\r\n assert good\r\n ok(sigmoid(x) * exp(-x), sigmoid(-x))\r\n ok(-x * sigmoid(x) * (y * (-1 * z) * exp(-x)),\r\n -x * sigmoid(-x) * (y * (-1 * z)))\r\n ok(-sigmoid(-x) *\r\n (exp(y) * (-exp(-z) * 3 * -exp(x)) *\r\n (y * 2 * (-sigmoid(-y) * (z + t) * exp(z)) * sigmoid(z))) *\r\n -sigmoid(x),\r\n sigmoid(x) *\r\n (-sigmoid(y) * (-sigmoid(-z) * 3) * (y * 2 * ((z + t) * exp(z)))) *\r\n -sigmoid(x))\r\n ok(exp(-x) * -exp(-x) * (-sigmoid(x) * -sigmoid(x)),\r\n -sigmoid(-x) * sigmoid(-x))\r\n ok(-exp(x) * -sigmoid(-x) * -exp(-x),\r\n -sigmoid(-x))", "def matlab_style_gauss2D(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def func(self, X, a, b):\n return a*np.exp(-b*X)", "def cal_gaussian_process(b, sigma2, X_train, y_train, X_test):\n n = X_train.shape[0]\n p = X_test.shape[0]\n\n K_n = np.array([[kernel(X_train[i], X_train[j], b) for i in range(n)] for j in range(n)])\n inv = np.linalg.inv(np.diag([sigma2] * n) + K_n)\n miu = np.zeros(p)\n Sigma = np.zeros(p)\n \n for j in range(p): # for every new point x0 in testing data.\n x0 = X_test[j]\n K_Dn = np.zeros(n) # initialize K_Dn \n for i in range(n):\n K_Dn[i] = kernel(X_train[i], x0, b) # calculate every item in K_Dn\n \n miu[j] = K_Dn.dot(inv).dot(y_train)[0] # calculate new distribution parameters\n Sigma[j] = sigma2 + kernel(x0, x0, b) - K_Dn.dot(inv).dot(K_Dn.T)\n \n return miu, Sigma", "def _sigma_2(gam, eps):\n s0 = r0**2 * alpha / (3 * eps) / mec2_unit\n\n s1_1 = 16 * (1 - eps + eps**2) * np.log(gam / eps)\n s1_2 = -1 / eps**2 + 3 / eps - 4 - 4 * eps - 8 * eps**2\n s1_3 = -2 * (1 - 2 * eps) * np.log(1 - 2 * eps)\n s1_4 = 1 / (4 * eps**3) - 1 / (2 * eps**2) + 3 / eps - 2 + 4 * eps\n s1 = s1_1 + s1_2 + s1_3 * s1_4\n\n s2_1 = 2 / eps\n s2_2 = (4 - 1 / eps + 1 / (4 * eps**2)) * np.log(2 * gam)\n s2_3 = -2 + 2 / eps - 5 / (8 * eps**2)\n s2 = s2_1 * (s2_2 + s2_3)\n\n return s0 * np.where(eps <= 0.5, s1, s2) * heaviside(gam - eps)", "def exp(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.exp())", "def compute_kernel_matrix(x,y,sigma):\n m = len(x)\n\n s = np.zeros((m,m))\n for i in range(len(x)):\n for j in range(i+1):\n s[i,j] = np.exp(-((x[i]-y[j])**2)/(2*sigma**2))\n for i in range(2,m):\n for j in range(0,i):\n s[i,j] = s[j,i]\n return s", "def eval(self, X, Y):\n sigma2 = self.sigma2\n sumx2 = torch.sum(X**2, dim=1).view(-1, 1)\n sumy2 = torch.sum(Y**2, dim=1).view(1, -1)\n D2 = sumx2 - 2.0*torch.matmul(X, Y.transpose(1, 0)) + sumy2\n K = torch.exp(-D2/(2.0*sigma2))\n return K", "def compute_dual_energy(b_i, phi_ij, psi_i, lambda_ij, gamma_ij, N):\n # Compute lambdas for all pair states (0,0),(1,0),(0,1) and (1,1)\n lmbd_tmp = numpy.zeros([N, N, 4])\n lmbd_tmp[:, :, 0] = lambda_ij[:, :, 0] + lambda_ij[:, :, 0].T\n lmbd_tmp[:, :, 1] = lambda_ij[:, :, 0] + lambda_ij[:, :, 1].T\n lmbd_tmp[:, :, 2] = lambda_ij[:, :, 1] + lambda_ij[:, :, 0].T\n lmbd_tmp[:, :, 3] = lambda_ij[:, :, 1] + lambda_ij[:, :, 1].T\n # Compute dual energy\n dual_energy = -numpy.sum(phi_ij * numpy.exp(-1. - lmbd_tmp -\n gamma_ij[:, :, numpy.newaxis]))\n dual_energy -= numpy.sum(\n psi_i * numpy.exp(-1. + (N - 1.) + numpy.sum(lambda_ij, axis=0)) *\n (b_i / psi_i) ** (N - 1))\n dual_energy -= numpy.sum(gamma_ij)\n return dual_energy", "def calculate_mse(e):\r\n return 1/2*np.mean(e**2)", "def gauss_2d(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def gauss_2d(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def uni_gaussian(X, mu, sigma2):\n p = (1 / sqrt(2 * pi * sigma2))\n p = p * exp(-power(X - mu, 2) / (2 * sigma2))\n\n def prod(x, y):\n return x * y\n p = array([[reduce(prod, el)] for el in p])\n\n return p", "def mse(image1: np.ndarray, image2: np.ndarray) -> np.ndarray:\n return np.sqrt(np.power((image1 - image2), 2).mean(axis=(-1, -2)))", "def similarity_matrix(points, sigma):\n distances_squared = spherical_distances(points, points)**2\n\n \n return np.exp( -distances_squared / (2.0 * sigma) )", "def test_l2_metric_exp_vectorization(\n self, l2_metric_s2, times, landmarks_a, landmarks_b, landmarks_c\n ):\n landmarks_ab = l2_metric_s2.geodesic(landmarks_a, landmarks_b)\n landmarks_bc = l2_metric_s2.geodesic(landmarks_b, landmarks_c)\n landmarks_ab = landmarks_ab(times)\n landmarks_bc = landmarks_bc(times)\n\n tangent_vecs = l2_metric_s2.log(point=landmarks_bc, base_point=landmarks_ab)\n\n result = l2_metric_s2.exp(tangent_vec=tangent_vecs, base_point=landmarks_ab)\n self.assertAllClose(gs.shape(result), gs.shape(landmarks_ab))", "def matlab_style_gauss2D(shape=(3,3),sigma=0.5):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def matlab_style_gauss2D(shape=(3,3),sigma=0.5):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def TwoE(A, B, C, D, Rab2, Rcd2, Rpq2):\n return 2.0 * (np.pi ** 2.5) / ((A + B) * (C + D) * np.sqrt(A + B + C + D)) * F0(\n (A + B) * (C + D) * Rpq2 / (A + B + C + D)) * np.exp(-A * B * Rab2 / (A + B) - C * D * Rcd2 / (C + D))", "def evaluate(self, X1, X2):\r\n\r\n \"\"\"YOUR CODE HERE FOR Q1.1\"\"\"\r\n # raise NotImplementedError()\r\n n1, d = X1.shape\r\n n2, _ = X2.shape\r\n D = euclidean_dist_squared(X1, X2)\r\n return np.exp(-D / (2 * self.sigma**2))", "def psfVal(ix, iy, x, y, sigma1, sigma2, b):\n return (math.exp (-0.5*((ix - x)**2 + (iy - y)**2)/sigma1**2) +\n b*math.exp (-0.5*((ix - x)**2 + (iy - y)**2)/sigma2**2))/(1 + b)", "def matlab_style_gauss2D(shape,sigma):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def log_mean_exp(x, dim):\n return log_sum_exp(x, dim) - np.log(x.size(dim))", "def M_sigma(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating sigma_2\", file=self.logfile)\n self.sigma2 = 1/self.L_h*(np.square(np.linalg.norm(self.w)) + np.trace(self.R))", "def exp(data):\n return _make.exp(data)", "def sse(matrix,motif):\n return sum([site_error(matrix,site)**2\n for site in motif])", "def seToSE( x ):\n x = asarray(x,dtype=float)\n if x.shape != (6,):\n raise ValueError(\"shape must be (6,); got %s\" % str(x.shape))\n #\n return expM(screw(x))", "def easomfcn(x: np.ndarray) -> np.ndarray:\n\n n = x.shape[1]\n assert n == 2, \"The Easom's function is only defined on a 2D space.\"\n X = x[:, 0]\n Y = x[:, 1]\n\n scores = (\n -np.cos(X) * np.cos(Y) * np.exp(-((X - np.pi) ** 2 + (Y - np.pi) ** 2))\n )\n return scores", "def func_exp(x, a, b, c):\n return a * np.exp(b * x) + c", "def MSE(a,b,axis):\n return ((a-b)**2).mean(axis=axis)", "def mse(sim, obs, dim=\"time\"):\n # wrap numpy function\n kwargs = dict(\n input_core_dims=[[dim], [dim]], dask=\"parallelized\", output_dtypes=[float]\n )\n mse = xr.apply_ufunc(_mse, sim, obs, **kwargs)\n mse.name = \"mse\"\n return mse", "def gaussian2d(x, y, A, sigma, x0):\n Z = A * np.exp(-( (x-x0[0])**2/(2*sigma[0]**2) + (y-x0[1])**2/(2*sigma[1]**2)))\n return Z", "def softmax(x, axis=1):\n sf = np.exp(x)\n sf = sf/np.sum(sf, axis=axis)[:,np.newaxis]\n return sf", "def softmax(x):\n if type(x) == list:\n dim=len(x)\n norm = np.sum(np.exp(x))\n for idx in range(dim):\n x[idx] = np.exp(x[idx])/norm\n elif type(x) == np.ndarray:\n dim=x.shape\n for col in range(dim[1]):\n norm = np.sum(np.exp(x[:, col]))\n for idx in range(dim[0]):\n x[idx, col] = np.exp(x[idx, col])/norm\n else:\n raise Exception('incorrect input')\n return x", "def exp(q):\n normv = amplitude(q[:,:3])\n res = np.zeros_like(q)\n res[:,3:] = np.exp(q[:,3:]) * np.cos(normv)\n res[:,:3] = np.exp(q[:,3:]) * q[:,:3] / normv \n res[:,:3] *= np.sin(normv)\n return res", "def compute_RMSE(e):\n \"\"\"Corresponds to sqrt(2*MSE)\"\"\"\n \n return np.sqrt(2*compute_MSE(e))", "def _mse2(self, trace, **inputs):\n exp = np.dot(inputs['gwas_gen'],\n trace['beta_med'].mean(axis=0).T)\n phen_pred = exp * trace['alpha'].mean()\n mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)\n return mse", "def perform_sigm_times_exp(tree, exp_x=None, exp_minus_x=None, sigm_x=None,\r\n sigm_minus_x=None, parent=None, child_idx=None,\r\n full_tree=None):\r\n\r\n if exp_x is None:\r\n exp_x = []\r\n if exp_minus_x is None:\r\n exp_minus_x = []\r\n if sigm_x is None:\r\n sigm_x = []\r\n if sigm_minus_x is None:\r\n sigm_minus_x = []\r\n if full_tree is None:\r\n full_tree = tree\r\n if False: # Debug code.\r\n print '<perform_sigm_times_exp>'\r\n print ' full_tree = %s' % full_tree\r\n print ' tree = %s' % tree\r\n print ' exp_x = %s' % exp_x\r\n print ' exp_minus_x = %s' % exp_minus_x\r\n print ' sigm_x = %s' % sigm_x\r\n print ' sigm_minus_x= %s' % sigm_minus_x\r\n neg, inputs = tree\r\n if isinstance(inputs, list):\r\n # Recurse through inputs of the multiplication.\r\n rval = False\r\n for sub_idx, sub_tree in enumerate(inputs):\r\n rval |= perform_sigm_times_exp(\r\n tree=sub_tree, parent=tree, child_idx=sub_idx,\r\n exp_x=exp_x, exp_minus_x=exp_minus_x, sigm_x=sigm_x,\r\n sigm_minus_x=sigm_minus_x, full_tree=full_tree)\r\n return rval\r\n else:\r\n # Reached a leaf: if it is an exponential or a sigmoid, then we\r\n # first attempt to find a match in leaves already visited.\r\n # If there is such a match, we modify the already-visited leaf\r\n # accordingly: for instance if we visited a leaf sigmoid(x), then\r\n # find later a -exp(-x), we replace the previous leaf by\r\n # -sigmoid(-x) and remove the -exp(-x) from the tree.\r\n # If no match is found, then we register this leaf so that it can\r\n # be found later while walking the tree.\r\n var = inputs\r\n keep_it = False\r\n exp_info = is_exp(var)\r\n if exp_info is not None:\r\n exp_neg, exp_arg = exp_info\r\n neg ^= exp_neg\r\n neg_arg = is_neg(exp_arg)\r\n if neg_arg is None:\r\n if not replace_leaf(exp_arg, sigm_minus_x, sigm_x,\r\n sigmoid, neg):\r\n exp_x.append((exp_arg, tree))\r\n keep_it = True\r\n else:\r\n if not replace_leaf(neg_arg, sigm_x, sigm_minus_x,\r\n lambda x: sigmoid(-x), neg):\r\n exp_minus_x.append((neg_arg, tree))\r\n keep_it = True\r\n elif var.owner and var.owner.op == sigmoid:\r\n sigm_arg = var.owner.inputs[0]\r\n neg_arg = is_neg(sigm_arg)\r\n if neg_arg is None:\r\n if not replace_leaf(sigm_arg, exp_minus_x, sigm_minus_x,\r\n lambda x: sigmoid(-x), neg):\r\n sigm_x.append((sigm_arg, tree))\r\n keep_it = True\r\n else:\r\n if not replace_leaf(neg_arg, exp_x, sigm_x, sigmoid, neg):\r\n sigm_minus_x.append((neg_arg, tree))\r\n keep_it = True\r\n else:\r\n # It is not an exponential nor a sigmoid.\r\n keep_it = True\r\n if not keep_it:\r\n # Delete this leaf, i.e. replace it by [False, None] (corresponding\r\n # to a multiplication by 1).\r\n assert parent is not None\r\n parent[1][child_idx] = [False, None]\r\n return not keep_it", "def dim_pow(dims, exp):\n return (\n dims[0] * exp,\n dims[1] * exp,\n dims[2] * exp,\n dims[3] * exp,\n dims[4] * exp,\n dims[5] * exp,\n dims[6] * exp,\n )", "def exp_mean_dense(x):\n # convert out of compressed sparse matrix\n return np.log((np.sum(np.exp(x)-1)/x.shape[1]) + 1)", "def compute_e(f_mat, m_mat):\r\n return m_mat.T @ f_mat @ m_mat", "def mse(A, B):\n return ((A - B) ** 2).mean(axis=0)", "def exp(x):\n raise NotImplementedError", "def expgaussian(mu, wid, timeconstant, x): \n # Gaussian signal broadened by an exponetial signal\n g = gaussian(mu, wid, x)\n \n hly = np.round( len(g) / 2.0 )\n ey = np.r_[np.zeros(hly),g,np.zeros(hly)]\n fy = np.fft.fft(ey)\n a = np.exp(-(np.arange(len(fy))) / timeconstant )\n fa = np.fft.fft(a)\n fy1 = fy * fa\n ybz = np.real(np.fft.ifft(fy1)) / np.sum(a)\n yb = ybz[hly:len(ybz)-hly]\n \n return yb", "def sse_optimized(matrix,motif):\n #Hoisted computation of K out of site_error\n K = 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])\n for b in \"ACGT\"]))\n for i in range(L)])\n return sum([(site_error_optimized(matrix,site)+K)**2\n for site in motif])", "def exp(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.exp(), diag_shape=self.diag_shape)", "def fspecial_gaussian(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def exp(X):\n X = np.maximum(X,100)\n return np.exp(X)", "def frobenius_inner_prod(mat1, mat2):\n assert mat1.shape==mat2.shape\n # assert isinstance(mat1, Variable) and isinstance(mat2, Variable))\n f = mat1.mul(mat2).sum()\n return f", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def mse(x1, x2, axis=0):\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.mean((x1 - x2) ** 2, axis=axis)", "def exp(t,tau):\n return np.exp(-t/tau)", "def MSE(actual, noisy):\n mean_squared_error(actual, noisy)", "def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])", "def exp(module, x):\n _import_modules()\n if module in [np, ma]:\n return np.exp(x)\n elif module == torch:\n return torch.exp(x)\n elif module == jnp:\n return jnp.exp(x)\n elif module == tf:\n return tf.math.exp(x)\n raise UnknownModuleException(f\"Module {module.__name__} not supported.\")", "def softmax(x):\n sf = np.exp(x)\n sf = sf / np.sum(sf, axis=0)\n return sf", "def softmax(x):\n sf = np.exp(x)\n sf = sf / np.sum(sf, axis=0)\n return sf", "def exp_fun(self, xs, *args, **kwargs):\n raise NotImplementedError", "def compute_sigma_star_2(self, shape, array, j):\n array_lag = np.roll(array[:,j], 1)\n array_lag[0] = array[0,j]\n scale = (0.01 + sum((array[:,j] - array_lag)**2))**(-1)\n return invgamma.rvs(a = shape, scale = scale, size = 1)[0]", "def model_2exp(a1, a2, t1, t2, s,**kwargs):\n # auxilary function taking a time array as argument.\n def aux(array_t, details=False):\n exp1 = model_1exp(a1, t1, s, **kwargs)(array_t)\n exp2 = model_1exp(a2, t2, s, **kwargs)(array_t)\n exp_tot = exp1+exp2\n\n # can return exponential components\n if details == True:\n return exp_tot, exp1, exp2\n elif details == False:\n return exp_tot\n\n return aux", "def _sigma_ep(self,gam,eps):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return self._sigma_1(gam,eps)", "def test_local_sigm_times_exp(self):\r\n def match(func, ops):\r\n #print [node.op.scalar_op for node in func.maker.fgraph.toposort()]\r\n assert [node.op for node in func.maker.fgraph.toposort()] == ops\r\n m = self.get_mode(excluding=['local_elemwise_fusion', 'inplace'])\r\n x, y = tensor.vectors('x', 'y')\r\n\r\n f = theano.function([x], sigmoid(-x) * tensor.exp(x), mode=m)\r\n match(f, [sigmoid])\r\n\r\n f = theano.function([x], sigmoid(x) * tensor.exp(-x), mode=m)\r\n match(f, [tensor.neg, sigmoid])\r\n\r\n f = theano.function([x], -(-(-(sigmoid(x)))) * tensor.exp(-x), mode=m)\r\n match(f, [tensor.neg, sigmoid, tensor.neg])\r\n\r\n f = theano.function(\r\n [x, y],\r\n (sigmoid(x) * sigmoid(-y) * -tensor.exp(-x) *\r\n tensor.exp(x * y) * tensor.exp(y)),\r\n mode=m)\r\n match(f, [sigmoid, tensor.mul, tensor.neg, tensor.exp, sigmoid,\r\n tensor.mul])", "def rmse_matrices(A: np.ndarray, B: np.ndarray, element_wise: bool = False) -> np.ndarray:\n A = np.copy(A)\n B = np.copy(B)\n if A.shape != B.shape:\n raise ValueError(\"Both arrays must have the same shape.\")\n if A.ndim == 2:\n return np.sqrt(np.mean((A - B)**2))\n if element_wise:\n return np.sqrt(np.mean((A - B)**2, axis=0))\n return np.sqrt(np.mean(np.mean((A - B)**2, axis=2), axis=1))", "def logsumexp2(z):\n return logsumexp(z, axis=2)", "def ema(matrix, alpha):\n\n # declare empty EMA numpy array\n e = np.zeros(matrix.shape[0])\n\n # set the value of the first element in the EMA array\n e[0] = matrix[0]\n\n # use the EMA formula to calculate the value of each point in the EMA array\n for t in range(1, matrix.shape[0]):\n e[t] = alpha*matrix[t] + (1 - alpha)*e[t - 1]\n\n return e", "def ema(matrix, alpha):\n\n # declare empty EMA numpy array\n e = np.zeros(matrix.shape[0])\n\n # set the value of the first element in the EMA array\n e[0] = matrix[0]\n\n # use the EMA formula to calculate the value of each point in the EMA array\n for t in range(1, matrix.shape[0]):\n e[t] = alpha*matrix[t] + (1 - alpha)*e[t - 1]\n\n return e", "def doubleexp2(params, t):\n # 2011-10-27 15:50 IJMC: Created\n\n if len(params)==3:\n return (1. - params[0] * exp(-t/params[1])) * exp(-t/params[2])\n else:\n return (1. - params[0] * exp(-t/params[1])) * exp(-t/params[2]) * params[3]", "def get_matrix_of_eigs(w: np.ndarray) -> np.ndarray:\n transform_eigs = np.zeros((w.shape[0], w.shape[0]),\n dtype=np.complex128)\n for i, j in product(range(w.shape[0]), repeat=2):\n if np.isclose(abs(w[i] - w[j]), 0):\n transform_eigs[i, j] = 1\n else:\n transform_eigs[i, j] = (np.exp(1j * (w[i] - w[j])) - 1) / (\n 1j * (w[i] - w[j]))\n return transform_eigs", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def test_exp():\n x = np.linspace(-3,3,13)\n\n default_use_numexpr = accel_math._USE_NUMEXPR\n\n accel_math._USE_NUMEXPR = True\n r1 = accel_math._exp(x)\n\n accel_math._USE_NUMEXPR = False\n r2 = accel_math._exp(x)\n\n np.testing.assert_almost_equal(r1,r2)\n\n accel_math._USE_NUMEXPR = default_use_numexpr", "def logmatmulexp(mat1: torch.Tensor, mat2: torch.Tensor, use_mm: bool = False) -> torch.Tensor:\n mat1_shape = mat1.size()\n mat2_shape = mat2.size()\n mat1 = mat1.contiguous().view(-1, mat1_shape[-1])\n mat2 = move_dim(mat2, 0, -1)\n mat2 = mat2.contiguous().view(-1, mat2_shape[0])\n\n if use_mm:\n mat1_max = mat1.max(dim=-1, keepdim=True)[0]\n mat2_max = mat2.max(dim=-1, keepdim=True)[0]\n mat1 = mat1 - mat1_max\n mat2 = mat2 - mat2_max\n\n out = _safe_log(torch.matmul(mat1.exp(), mat2.exp().t()))\n out = out + mat1_max + mat2_max.t()\n else:\n out_sum = mat1.unsqueeze(1) + mat2.unsqueeze(0)\n out = logsumexp(out_sum, dim=-1)\n\n return out.view(concat_shape(mat1_shape[:-1], mat2_shape[1:]))", "def test_exp_vectorization(self, space, point, tangent_vecs):\n space.equip_with_metric(self.Metric)\n end_points = space.metric.exp(\n tangent_vec=tangent_vecs,\n base_point=point,\n )\n result = end_points.shape\n expected = (tangent_vecs.shape[0], 2)\n self.assertAllClose(result, expected)", "def test_exp_square():\n\timport odelab.scheme.exponential as E\n\tfor name in dir(E):\n\t\tcls = getattr(E, name)\n\t\tif hasattr(cls, 'general_linear_z'):\n\t\t\tobj = cls()\n\t\t\ta,b = obj.general_linear_z(np.eye(2))\n\t\t\tnb_stages = len(a)\n\t\t\ttail_length = obj.tail_length\n\t\t\tyield CheckSquare(name),name, a,b, nb_stages, tail_length", "def exponentiate_and_normalize(values, dim=0):\n\n return torch.exp(lognormexp(values, dim=dim))", "def exponentiate_and_normalize(values, dim=0):\n\n return torch.exp(lognormexp(values, dim=dim))", "def MVgaussian(size,mu1=0,mu2=0, sigma1=3,sigma2 = 1):\n kernel = np.zeros((size, size), dtype=np.float32)\n \n size = int(size) // 2\n X = np.arange(-size,size+1)\n Y = np.arange(-size,size+1)\n \n for x in X:\n for y in Y:\n Gx = np.exp(-((x-mu1)**2)/(2*(sigma1**2)))\n Gy = np.exp(-((y-mu2)**2)/(2*(sigma2**2)))\n Gx = math.exp(-(math.pow(x-mu1,2))/(2*math.pow(sigma1,2)))\n Gy = math.exp(-(math.pow(y-mu2,2))/(2*math.pow(sigma2,2)))\n kernel[x+size,y+size] = Gx*Gy\n return kernel", "def spher_harm_exp_v2(V,Yj,scale):\n # Convert the 3D DC potential into 1D array.\n # Numerically invert, here the actual expansion takes place and we obtain the expansion coefficients M_{j}.\n sz = np.array(V).shape\n npts = sz[0]*sz[1]*sz[2]\n W=np.reshape(V,npts) # 1D array of all potential values\n W=np.array([W]).T # make into column array\n Mj=np.linalg.lstsq(Yj,W)\n Mj=Mj[0] # array of coefficients\n Order = np.sqrt(len(Mj))-1\n # rescale to original units\n i = 0\n Order = int(np.sqrt(len(Mj))-1)\n for n in range(1,Order+1):\n for m in range(1,2*n+1):\n i += 1\n Mj[i] = Mj[i]/(scale**n)\n return Mj", "def _sigmoid_m(self, X):\n result = np.zeros((X.shape[0], X.shape[1]), dtype='float32')\n for i in range(X.shape[0]):\n for j in range(X.shape[1]):\n result[i, j] = 1.0 / (1.0 + np.exp(-X[i, j]))\n return result", "def np_elementwise(mat1, mat2):\n return (mat1 + mat2, mat1-mat2, mat1 * mat2, mat1/mat2)", "def exp(self):\n return Factor().__build( VarSet(self.v) , np.exp(self.t) )", "def multivariateGaussian(X, mu, Sigma2):\n k = mu.shape[0]\n\n if Sigma2.shape[1] == 1 or Sigma2.shape[0] == 1:\n Sigma2 = np.diag(Sigma2[:, 0])\n\n X = (X-mu.T).copy()\n p = (2*np.pi)**(-k/2)*np.linalg.det(Sigma2)**-0.5\n p = p*np.exp(-0.5*(X.dot(np.linalg.pinv(Sigma2))*X).sum(1, keepdims=True))\n return p", "def exp2_inplace(a):", "def run_numpy(self):\n return np.linalg.eigh(self.mat)", "def softmax(x): \n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H" ]
[ "0.60777706", "0.6070397", "0.6037772", "0.60048586", "0.59793174", "0.58501387", "0.58492404", "0.58091897", "0.57963014", "0.57812047", "0.57540333", "0.57262975", "0.57174045", "0.5708458", "0.5661374", "0.5589162", "0.55674833", "0.5548229", "0.5532804", "0.5519702", "0.54916966", "0.54835385", "0.5474528", "0.54680383", "0.5456158", "0.5456158", "0.5452313", "0.5418749", "0.5401341", "0.5395118", "0.53858733", "0.53858733", "0.5362595", "0.5361635", "0.53553575", "0.5350479", "0.5348321", "0.5338657", "0.5320004", "0.53186786", "0.531603", "0.5303144", "0.5300526", "0.52781266", "0.5272391", "0.5269842", "0.52651966", "0.52643055", "0.52437574", "0.52425915", "0.52404386", "0.5231633", "0.5224332", "0.52087235", "0.5203727", "0.51982546", "0.51969475", "0.5196199", "0.5192377", "0.51873386", "0.51837546", "0.5182736", "0.51772773", "0.51747644", "0.5172783", "0.51697797", "0.51679283", "0.5167631", "0.5153145", "0.5150834", "0.5140671", "0.5140671", "0.5135554", "0.5118057", "0.5108687", "0.5106982", "0.50947094", "0.5091585", "0.50888526", "0.50856847", "0.50856847", "0.5081475", "0.50761914", "0.5068948", "0.50680614", "0.5064137", "0.5055759", "0.50543016", "0.5050767", "0.5050767", "0.50507647", "0.50478894", "0.5047238", "0.5041728", "0.50408405", "0.50406796", "0.50393873", "0.5034815", "0.5033738", "0.5030269" ]
0.7608249
0
Each row of the matrix, let's say the jth row, represents the distance between the other data point from the jth point. This function returns the indexes for the points with the smallest distances with respect to each point represented by that specified row. By row, I mean the 0th dimension. Also notice that this function does not include the target particle, i.e. the diagonal element along the matrix is set to zero.
def nearest_points_indexes_without_self(matrix, num_to_keep): # Set the diagonal to 0 np.fill_diagonal(matrix, 0) # Get the position for the resulted values sort_arg = np.argsort(matrix, axis=1) return sort_arg[:, : num_to_keep]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nearest_min(dist_matrix):\n # much faster than np.where\n i, j = np.unravel_index(\n np.argmin(dist_matrix), \n dims=dist_matrix.shape\n )\n return i, j", "def nearest_points_indexes_with_self(matrix, num_to_keep):\n\n # Set the diagonal to 1\n np.fill_diagonal(matrix, 1)\n # Get the position for the resulted values\n sort_arg = np.argsort(matrix, axis=1)\n\n return sort_arg[:, : num_to_keep]", "def find_min_distance():\n return np.argmin(d)", "def smallest_distance(self, clusters):\n i, j = numpy.unravel_index(numpy.argmin(clusters), clusters.shape)\n return clusters[i, j], i, j", "def nearest_points_values_with_self(matrix, num_to_keep):\n\n # Set the diagonal to 1\n np.fill_diagonal(matrix, 1)\n # Get the position for the resulted values\n sort = np.sort(matrix, axis=1)\n\n return sort[:, : num_to_keep]", "def nearest_neighbour(matrix, start=0):\n path = [start]\n while len(matrix) != len(path):\n matrix[:, start] = numpy.inf\n start = numpy.argmin(matrix[start])\n path.append(start)\n return path", "def nearest_cluster(X,c):\n K = np.size(c,0)\n idx = np.zeros((np.size(X,0),1))\n arr = np.empty((np.size(X,0),1))\n for i in range(0,K):\n y = c[i]\n temp = np.ones((np.size(X,0),1))*y\n b = np.power(np.subtract(X,temp),2)\n a = np.sum(b,axis = 1)\n a.resize((np.size(X,0),1))\n arr = np.append(arr, a, axis=1)\n arr = np.delete(arr,0,axis=1)\n idx = np.argmin(arr, axis=1)\n return idx", "def nearest_vertex_to(self, point):\n distances = self.distances_to(point)\n idx = np.argmin(distances)\n return idx", "def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y", "def _find_min_pair(pandas_matrix):\n numpy_matrix = pandas_matrix.values\n mins = np.where(numpy_matrix == np.nanmin(numpy_matrix))\n min_col_idx = mins[0][0]\n min_row_idx = mins[1][0]\n (min_col, min_row) = (pandas_matrix.index[min_col_idx], \n pandas_matrix.columns[min_row_idx])\n\n return (min_col, min_row)", "def find_closest_index(traj, point):\n\n\t#TODO: vectorise function to receive any length of points.\n\n\tdistances = np.subtract(np.array(point),traj) \n\tdistances = distances.reshape(-1,2)\n\t#distances = distances[~np.isnan(distances)].reshape(-1,2)\n\n\t#print(\"distances\")\n\t#pprint(distances)\n\tdist_array = np.linalg.norm(distances, axis = 1)\n\t#pprint(dist_array)\n\t#dist_array = np.sqrt((distances[:,0]**2)+(distances[:,1]**2)) #array of distances from trajectory to gaze landing point in world. \n\tidx = np.nanargmin(abs(dist_array)) #find smallest difference in pythag distance from 0,0 to get closest point. \n\tdists = distances[idx, :]\n\tdist = dist_array[idx]\n\n\treturn idx#, dists, dist\n\t#return idx", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def nearest_points_values_without_self(matrix, num_to_keep):\n\n # Set the diagonal to 0\n np.fill_diagonal(matrix, 0)\n # Get the position for the resulted values\n sort = np.sort(matrix, axis=1)\n\n return sort[:, : num_to_keep]", "def find_closest(distances, threshold):\n n = len(distances)\n person_1 = []\n person_2 = []\n d = []\n\n for i in range(n):\n for j in range(i+1, n):\n if distances[i][j] <= threshold:\n person_1.append(i)\n person_2.append(j)\n d.append(distances[i][j])\n\n return person_1, person_2, d", "def getRowHeuristics(matrix):\n row, col = matrix.shape\n rHeuristic = np.zeros((row,2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica\n for i in range(0,row):\n rHeuristic[i,0] = int(i)\n #print (i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i])))\n rHeuristic[i,1] = 1/sum(matrix[i,:])\n return rHeuristic[rHeuristic[:,1].argsort()]", "def nearestNeighbor(self, coords, my_index, blacklist):\n min_index, min_distance = 0, 999999999\n my_col, my_row = coords[my_index]\n for i, (col, row) in enumerate(coords):\n if i in blacklist:\n continue\n distance = math.sqrt((my_col-col)**2 + (my_row-row)**2)\n if distance < min_distance:\n min_index, min_distance = i, distance\n return min_index", "def _nearest_cluster_distance(distances_row, labels, i):\n label = labels[i]\n b = np.min([np.mean(distances_row[labels == cur_label])\n for cur_label in set(labels) if not cur_label == label])\n return b", "def find_closest_trajectory_pose(self):\n np_state = numpy.array([[self.x], [self.y]])\n temp_distance = numpy.sum(\n (self.np_trajectory[0:2, :] - np_state) ** 2,\n axis=0)\n best_idx = numpy.argmin(temp_distance)\n return best_idx", "def findknn(xTr,xTe,k):\n\n # YOUR CODE HERE\n if k > len(xTr):\n k = len(xTr)\n \n D=l2distance(xTe, xTr)\n (m,n) = D.shape\n \n indices = []\n dists = []\n for i in range(m):\n smallest_indices = np.argsort(D[i])\n ind = smallest_indices[:k]\n dis = D[i,smallest_indices[:k]]\n indices.append(ind)\n dists.append(dis)\n \n indices = np.transpose(np.array(indices))\n dists = np.transpose(np.array(dists))\n return indices, dists", "def getNearestSampleIndex(test, trainX):\n dist_matrix = test - trainX\n dist_square = dist_matrix ** 2\n dist_sums = dist_square.sum(axis=1)\n distance_vector = np.sqrt(dist_sums)\n return (distance_vector).argmin()", "def _first_index_with_smaller_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] <= P[i]:\n i -= 1\n return i", "def saddle_points(matrix):\n if not all(len(row) == len(matrix[0]) for row in matrix[1:]):\n raise ValueError('Provided matrix is irregular.')\n columns = [col for col in zip(*matrix)]\n points = set()\n for ridx, row in enumerate(matrix):\n for cidx, element in enumerate(row):\n if element == max(row) and element == min(columns[cidx]):\n points.add((ridx, cidx))\n return points", "def _compute_euclidean_neigh_matrix(src, d_matrix, radius):\n\n n_max = 100\n n_min = 3\n reached_points = np.array([0])\n counter = 0\n n_neigh = []\n list_neigh = []\n\n while counter < reached_points.shape[0] < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n # Check the number of neighbours\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n list_neigh.append(aux)\n reached_points = np.append(reached_points,\n aux[~np.in1d(aux, reached_points)])\n counter += 1\n\n if counter >= reached_points.shape[0]:\n raise ValueError('Too small value of the radius:'\n 'the neighbour-matrix is not connected')\n elif src.shape[0] == reached_points.shape[0]:\n while counter < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n\n list_neigh.append(aux)\n counter += 1\n\n n_neigh_max = max(n_neigh)\n n_matrix = np.zeros([src.shape[0],\n n_neigh_max], dtype=int) - 1\n for i in range(src.shape[0]):\n n_matrix[i, 0:list_neigh[i].shape[0]] = list_neigh[i]\n index_ord = np.argsort(n_matrix[:, 0])\n n_matrix = n_matrix[index_ord]\n return n_matrix\n else:\n raise RuntimeError(\"Some problems during\"\n \"computation of neighbours.\")", "def min_distance_vertex(distance, visited):\n vertices = len(visited)\n min_distance = INF\n min_index = None\n for v in range(vertices):\n if not visited[v] and distance[v] <= min_distance:\n min_distance = distance[v]\n min_index = v\n return min_index", "def shortestDistance(self, grid):\n # return self.house_oriented_TLE(grid)\n # One axis\n row_count = [sum(row) for row in grid]\n col_count = [0]* len(grid[0])\n row_dist = [0]* len(grid)\n col_dist = [0]* len(grid[0])\n output = sys.maxsize\n for i in range(len(grid)): \n for j in range(len(grid[0])):\n col_count[j] += grid[i][j]\n \n for index_p in range(len(row_count)):\n for index_h in range(len(row_count)):\n row_dist[index_p] += abs(index_h - index_p) * row_count[index_h]\n \n for index_p in range(len(col_count)):\n for index_h in range(len(col_count)):\n col_dist[index_p] += abs(index_h - index_p) * col_count[index_h]\n \n # print(row_count)\n # print(col_count)\n # print(row_dist)\n # print(col_dist)\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n continue\n output = min(output, row_dist[i] + col_dist[j])\n return output", "def closest_points_naive(self, x, y):\r\n # Running time: O(n ** 2)\r\n\r\n dist = []\r\n for i in range(len(x)):\r\n for j in range(i+1, len(x)):\r\n d = self.get_euclidean_distance(x[i], x[j], y[i], y[j])\r\n dist.append(d)\r\n \r\n return min(dist)", "def find_min_point(points):\r\n smallest_point_index = 0\r\n for i in range(1, len(points)):\r\n if points[i][1] < points[smallest_point_index][1]:\r\n smallest_point_index = i\r\n elif points[i][0] > points[smallest_point_index][0] and points[i][1] == points[smallest_point_index][1]:\r\n smallest_point_index = i\r\n return smallest_point_index", "def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def _findMin(p, A):\n\n m=(-1, (0,0))\n for p0 in A:\n dist = np.linalg.norm(p0-np.array(p))\n if m[0]==-1 or m[0]>dist:\n m = (dist, p0)\n \n return tuple(m[1])", "def get_idx(lons, lats, lon, lat):\n dist = ((lons - lon) ** 2 + (lats - lat) ** 2) ** 0.5\n return np.unravel_index(dist.argmin(), dist.shape)", "def get_min_dist(x0, y0, arr):\n dist = np.hypot(arr.T[0] - x0, arr.T[1] - y0)\n min_dist = np.min(dist)\n val = np.argmin(dist)\n return min_dist, arr[val]", "def dist(dm, sm, neighbors):\n\n # Initialize list of possible distances\n distances = []\n\n # loop over all neighbors of the cell\n for neighbor in neighbors:\n # If the neighbor is valid\n if dm[neighbor[0], neighbor[1]] != -1:\n # add neighbor distance + 1 to possible distances\n distances.append(dm[neighbor[0], neighbor[1]] + 1)\n\n # return minimal distance\n return np.min(distances)", "def nodes_min_energy_index(self, node):\n idx = -1\n curr_energy = np.inf\n for i in range(self.cost_matrix.shape[1]):\n new_energy = self.cost_matrix[node][i]\n if new_energy < curr_energy:\n curr_energy = new_energy\n idx = i\n return idx", "def pair_idx(rows, comm=None):\n raise Exception(\"Not implemented\")\n \n if comm == None:\n comm = MPI.COMM_WORLD\n \n total = comb(rows,2,exact=True)\n size = comm.Get_size()\n \n size = 1000\n \n print(total / size)\n \n target = total / size\n \n current_row = 0\n calc_list = []\n row_list = [[] for x in range(size)]\n for rank in range(size):\n row_list[rank].append(current_row)\n \n current_calcs = 0\n \n for value in range(current_row, rows):\n current_calcs += value\n if current_calcs > target:\n if rank == size-1:\n pass\n else:\n break\n \n calc_list.append(current_calcs)\n row_list[rank].append(value)\n current_row = value\n \n return row_list,calc_list", "def get_maxmin_index_from_row(\n distance_matrix: np.ndarray,\n row: int,\n previous_indexes: List,\n type: str,\n )-> int:\n distance_matrix = distance_matrix.copy()\n arr = distance_matrix[row].astype(float)\n \n aux_list = range(arr.shape[0])\n aux_list_2 = []\n for i in aux_list:\n if i in previous_indexes:\n aux_list_2.append(True)\n else:\n aux_list_2.append(False)\n previous_indexes_bool = aux_list_2\n \n if type == 'max':\n arr[previous_indexes_bool] = -1\n target_index = np.argmax(arr)\n if type == 'min':\n arr[previous_indexes_bool] = np.Inf\n target_index = np.argmin(arr)\n \n return target_index", "def get_nearest_atom_inds_per_mol(self):\n self.closest_at_per_mol = np.zeros((self.nmol,\n self.at_per_mol,\n self.at_per_mol-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.at_per_mol)\n for imol in range(self.nmol):\n for iat in range(self.at_per_mol):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist_per_mol[imol, iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_at_per_mol[imol, iat] = at_inds", "def distance_matrix(n_row, n_col):\n\n n_pop = int(n_row * n_col)\n center = int(n_row/2*(n_col+1))\n\n pop_idx = np.arange(n_pop)\n pop_idx_col = np.remainder(pop_idx, n_col)\n pop_idx_row = pop_idx // n_row\n\n pos = np.vstack((pop_idx_col,pop_idx_row)).T\n distance = spa.distance.cdist([pos[center]], pos)[0]\n\n return distance", "def find_min(self, A, w):\n import numpy as np\n\n vcost = self.INFINITY\n vto = vfrom = -1\n for v in w:\n # Get array offset of minimum of this vertex\n i = np.argmin(A[v,:])\n if A[v,i] < vcost:\n vcost = A[v,i]\n vto = i\n vfrom = v\n return (vfrom, vto, vcost)", "def find_nearest_neighbours_SURF(averageDistance,inst,distanceArray,maxInst): \r\n NN=[]\r\n min_indices=[] \r\n\r\n for j in range(maxInst):\r\n if inst != j:\r\n locator = [inst,j]\r\n locator = sorted(locator, reverse=True) #Access corect half of table (result of removed table redundancy)\r\n d = distanceArray[locator[0]][locator[1]]\r\n if d<averageDistance:\r\n min_indices.append(j)\r\n \r\n for j in range(len(min_indices)):\r\n NN.append(min_indices[j])\r\n \r\n return NN", "def min(weightData , dataSetVector ):\r\n # weightData: pass the whole weightData array.\r\n # dataSetVector: pass the a data vector to compare with weightdata array, to find its closest match\r\n winnerIndex = 0 #flag for initalizing the winner index\r\n minValue = EcuDist(dataSetVector,weightData[0]) # initalize the minValue\r\n # iterate through all weighdata rows to find the closest match, depending on ecu. distance,\r\n #and then return the index of the closest match(winner)\r\n for i in range(weightData.shape[0]):\r\n if(EcuDist(dataSetVector,weightData[i]) < minValue):\r\n minValue = EcuDist(dataSetVector,weightData[i])\r\n winnerIndex = i\r\n return winnerIndex", "def get_nearest_atom_inds(self):\n # Create empty data structure\n self.closest_ats = np.zeros((self.natom, self.natom-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.natom)\n for iat in range(self.natom):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist[iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_ats[iat] = at_inds", "def find_nearest_neighbors(p, points, k=5):\n dist = np.zeros(points.shape[0])\n for i in range(len(dist)):\n dist[i] = distance(p, points[i])\n ind = np.argsort(dist)\n return ind[0:k]", "def update_distmatrix(min_idx, dist_matrix):\n i, j = min_idx\n new_cluster_dist = np.minimum(dist_matrix[i, :], dist_matrix[j, :])\n new_cluster_dist[i] = np.inf\n \n dist_matrix[i, :] = new_cluster_dist\n dist_matrix[:, i] = new_cluster_dist\n \n dist_matrix[j, :] = np.inf\n dist_matrix[:, j] = np.inf\n \n return dist_matrix", "def getNeighbors(training_data, test_row, k):\n\n distances = list()\n for training_row in training_data:\n dist = euclidianDistance(training_row, test_row)\n distances.append([training_row, dist])\n \n #Sort on the basis of dist\n distances.sort(key=lambda row:row[1])\n\n neighbors = list()\n\n for i in range(int(k)):\n neighbors.append(distances[i][0])\n\n return neighbors", "def get_nearest_node_index(node_list, random_node):\n\n dist_list = [\n (node.x - random_node.x) ** 2 + (node.y - random_node.y) ** 2\n for node in node_list\n ]\n minind = dist_list.index(min(dist_list))\n\n return minind", "def findSmallest(distancesWithNames):\n smallest = distancesWithNames[0][2]\n smallestIndex = -1\n for i in range(len(distancesWithNames)):\n if smallest >= distancesWithNames[i][2]:\n smallest = distancesWithNames[i][2]\n smallestIndex = i\n return smallestIndex", "def closestCluster(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n\treturn bestIndex", "def get_min_shannon_entropy(grid):\r\n curr_min = math.inf\r\n curr_best = []\r\n for i in range(len(grid[0])):\r\n for j in range(len(grid)):\r\n if not grid[j][i].collapsed:\r\n w = grid[j][i].block_weights\r\n shannon_entropy = sum([-math.log(el) for el in w] )\r\n if shannon_entropy < curr_min:\r\n curr_min = shannon_entropy\r\n curr_best = [(i,j)]\r\n elif shannon_entropy == curr_min:\r\n curr_best.append((i,j))\r\n idx = np.random.choice(range(len(curr_best))) #choose randomly if theres a tie\r\n return curr_best[idx] #x,y\r", "def detec_mfo_dist(betas):\n k = len(betas)\n min_dist = np.inf\n for i in range(k-1):\n for j in range(i+1,k):\n distance = np.sum((betas[i,:] - betas[j,:])**2)\n if distance < min_dist:\n MFO_index = [i,j]\n min_dist = distance\n return MFO_index", "def smallest_cert(pts):\n # 1. project the points\n def projected_flattened_pt(pt):\n mag = distance(pt)\n distance_wanted = rect_dist\n return distance_wanted/mag*pt[1], \\\n distance_wanted/mag*pt[2]\n\n flat_pts = np.array([[projected_flattened_pt(pt), pt] for pt in pts])\n\n # 2. identify rows of data\n data = {} \n # maps height (from ground) to the \"row\" (scan) list of\n # horizontal-only (1D) data\n for pt, origpt in flat_pts:\n for row_height in data:\n if abs(pt[1]-row_height) < dist_bt_rows/2:\n data[row_height].append([pt[0], origpt])\n break\n else:\n data[pt[1]] = [[pt[0], origpt]]\n\n #3. build up final_data so each row has enough point x-density\n def get_left_i(pts, left_edge):\n # get last index <= left_edge\n for i, pt in enumerate(pts):\n if pt[0] > left_edge:\n return i - 1\n\n def get_right_i(pts, right_edge):\n # get first index >= right_edge\n i = len(pts)-1\n while True:\n pt = pts[i]\n if pt[0] < right_edge:\n return i + 1\n i -= 1\n\n final_data = []\n for _, subdata in data.items():\n subdata = sorted(subdata, key=lambda t: t[0])\n i = get_left_i(subdata, lane_left)\n end = get_right_i(subdata, lane_right)\n\n final_data = [subdata[i][1]]\n while i < end:\n next_i = get_left_i(subdata[i:], \n subdata[i][0]+max_xpt_separation)\n if not next_i:\n return False\n i += next_i\n final_data.append(subdata[i][1])\n\n return final_data", "def _lowestDistanceToCluster(self, clusters: ndarray, sequenceIdx: int) -> Tuple[int, float]:\n lowestClusterIdx = -1\n lowestDistance = np.inf\n for cIdx in range(self.numClusters):\n distance = self._distanceToCluster(clusters[cIdx], sequenceIdx)\n if distance < lowestDistance:\n lowestClusterIdx = cIdx\n lowestDistance = distance\n return lowestClusterIdx, lowestDistance", "def eeg_findnearest(x,X):\t\n\t#x array or vector and X a scalar\n\tabsdif = np.abs(x-X)\n\tval = np.min(absdif)\n\tidx = absdif.argmin()\n\treturn val,idx", "def get_index_under_point(self, event):\r\n xy = np.asarray(list(zip(self.xs, self.ys)))\r\n xyt = self.line.get_transform().transform(xy)\r\n xt, yt = xyt[:, 0], xyt[:, 1]\r\n d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2)\r\n pt_idx = np.argmin(d)\r\n if d[pt_idx] >= self.max_pixels_from_vertex:\r\n pt_idx = None\r\n return pt_idx", "def _kth_nearest_neighbor_dist(\n distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix], k\n) -> np.ndarray:\n\n if not is_integer(k):\n raise ValueError(f\"parameter 'k={k}' must be a positive integer\")\n else:\n # make sure we deal with Python built-in\n k = int(k)\n\n if not (0 <= k <= distance_matrix.shape[1]):\n raise ValueError(\n \"'k' must be an integer between 1 and \"\n f\"distance_matrix.shape[1]={distance_matrix.shape[1]}\"\n )\n\n if isinstance(distance_matrix, np.ndarray):\n dist_knn = np.partition(distance_matrix, k - 1, axis=1)[:, k - 1]\n elif isinstance(distance_matrix, scipy.sparse.csr_matrix):\n # see mircobenchmark_kth_nn.py for a comparison of implementations for the\n # sparse case\n\n def _get_kth_largest_elements_sparse(\n data: np.ndarray,\n indptr: np.ndarray,\n row_nnz,\n k_neighbor: int,\n ):\n dist_knn = np.zeros(len(row_nnz))\n for i in range(len(row_nnz)):\n start_row = indptr[i]\n dist_knn[i] = np.partition(\n data[start_row : start_row + row_nnz[i]], k_neighbor - 1\n )[k_neighbor - 1]\n\n return dist_knn\n\n row_nnz = distance_matrix.getnnz(axis=1)\n\n if (row_nnz < k).any():\n raise ValueError(\n f\"There are {(row_nnz < k).sum()} points that \"\n f\"do not have at least k_neighbor={k}.\"\n )\n\n dist_knn = _get_kth_largest_elements_sparse(\n distance_matrix.data,\n distance_matrix.indptr,\n row_nnz,\n k,\n )\n else:\n raise TypeError(f\"type {type(distance_matrix)} not supported\")\n\n return dist_knn", "def closestCentroids(self, points , centroids ):\n dists = scipy.spatial.distance.cdist(points,centroids)\n # 1 is dimension\n minIds = numpy.argmin(dists, 1)\n return minIds", "def closestClusterAndDistance(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n return (bestIndex, closest)", "def get_nearest_indices(vector, matrix, num=5):\n if len(matrix) < num:\n num = len(matrix)\n\n m, n = matrix.shape\n diff_matrix = np.tile(vector, (m, 1)) - matrix\n diff_matrix = abs(diff_matrix)\n\n distance = diff_matrix.sum(axis=1)\n sortIndices = np.argsort(distance)\n sortIndices = sortIndices[0:num]\n\n return sortIndices[random.randint(0, num - 1)]", "def exhaustive_search(X, z):\n #initialize shortest path and distance to the first row\n d_star = la.norm(z - X[0])\n x_star = X[0]\n #check the distance from each row of X and if shortest thus far, save it\n for i in range(X.shape[0]):\n #calculate distance from current row to target z\n x = X[i,:]\n cur_dist = la.norm(x - z)\n #if the distance is smallest thus far, save the row and distance\n if cur_dist < d_star:\n x_star = x\n d_star = cur_dist\n return x_star, d_star", "def find_min_path(s, t, dist):\n\n rows = len(dist) - 1\n cols = len(dist[0]) - 1\n col = cols\n row = rows\n pos_str = \"Position: (row={} col={}) -> (row={} col={})\"\n cst_str = \"Cost: {}\"\n prev_row = row\n prev_col = col\n\n # init sparse path matrix\n sparse_path = [[\" \" for x in range(cols + 1)] for x in range(rows + 1)]\n sparse_path[0][0] = \"0\"\n\n # start with operation at (rows, cols) and work backwards\n sparse_path[rows][cols] = dist[rows][cols]\n\n if verbose == 2:\n print()\n print(\"Initial Minimum Path Matrix:\")\n print_matrix(s, t, sparse_path)\n\n while True:\n\n # bail out if we are in the corner\n if row == 0 and col == 0:\n break\n\n # if we are not at a matrix boundary\n if row != 0 and col != 0: # if at left edge or top row, cannot move diagonally\n\n # diagonal\n if (dist[row - 1][col - 1] == min(dist[row - 1][col],\n dist[row][col - 1],\n dist[row - 1][col - 1])) and (dist[row - 1][col - 1] == dist[row][col] or dist[row - 1][col - 1] == dist[row][col] - 1):\n sparse_path[row - 1][col - 1] = dist[row - 1][col - 1]\n temp_cost = dist[row - 1][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # left\n elif dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # above\n else:\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # if at matrix edge, can only move up\n elif col == 0:\n # above\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # must be at row boundary, can only move left\n else:\n # left\n if dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # print matrix\n if verbose == 2:\n print_matrix(s, t, sparse_path)\n\n return sparse_path", "def find_neighbor_indices(atoms, probe, k):\r\n coords_all = atoms.getCoords()\r\n neighbor_indices = []\r\n atom_k = atoms[k]\r\n radius = atom_k.getRadius() + probe + probe\r\n\r\n indices = range(k)\r\n indices.extend(range(k+1, len(atoms)))\r\n\r\n for i in indices:\r\n\tdist = pos_distance(coords_all[k], coords_all[i])\r\n\t#dist = np.linalg.norm(coords_all[k] - coords_all[i])\r\n\tif dist < radius + atoms[i].getRadius():\r\n neighbor_indices.append(i)\r\n\r\n return neighbor_indices", "def find_closest_atom(coords1, coords2):\n\n coords1 = np.array(coords1)\n coords2 = np.array(coords2)\n diff = coords2[:, np.newaxis] - coords1[np.newaxis, :]\n dist = np.einsum('ijk->ij', diff**2)**0.5\n index = np.argmin(dist)\n return index", "def djikstre(connection_mat):\n n = connection_mat.shape[0]\n dist, prev = {}, {}\n Q = list(range(n))\n \n for i in Q:\n dist[i] = np.inf\n dist[n-2] = 0.0\n \n while(len(Q)>0):\n\n min_dist = min([dist[key] for key in Q])\n u = [key for key in Q if dist[key] == min_dist][0]\n Q.remove(u)\n\n for v in np.nonzero(connection_mat[:, u])[0]:\n \n alt = dist[u]+connection_mat[v, u]\n \n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n \n return dist, prev", "def _first_index_with_bigger_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] >= P[i]:\n i -= 1\n return i", "def neirest_neighbour(business, cells):\n array = cells.get_neighbours(business, num=1)\n neighbours = pd.DataFrame(array).set_index('index')\n index = neighbours['distance'].idxmin()\n return neighbours.loc[index]", "def _retrieve_neighbors(df, i_point, point, eps, column):\n neigborhood = []\n for index, row in df.iterrows():\n if index != i_point:\n a = np.array(point[column])\n b = np.array([row[column]])\n distance = np.linalg.norm(a - b)\n if distance <= eps:\n neigborhood.append(index)\n\n return neigborhood", "def find_nearest_neighbors(p, points, k):\n\timport numpy as np\n\tdistances = np.zeros(points.shape[0])\n\tfor i in range(len(distances)):\n\t\tdistances[i] = distance(p,points[i])\n\tind = np.argsort(distances)\n\treturn ind[0:k]", "def get_nearest_row(self):\n return (self.rect.top - (self.screen.get_height() // 12)) // self.maze.block_size", "def find_nearest_neighbour_from_point(point_cloud:np.ndarray, point:int) -> int:\n pass", "def _nearest_point_index(points, point):\n distance = sys.float_info.max\n index = None\n for i, p in enumerate(points):\n temp = _vec_distance(p, point)\n if temp < distance:\n distance = temp\n index = i\n return index, distance", "def nearest_neighbor(data):\n features = set([i for i, x in enumerate(data[0][1])])\n return leave_one_out_cross_validation(data, features)", "def shortestpathij(self, i, j):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for i in range(len(pathlist)):\n Temp = 0\n for j in range(len(pathlist[i]) - 1):\n Temp += self.Dismatrix[pathlist[i][j], pathlist[i][j+1]]\n distance.append(Temp)\n \n if(len(distance) == 0):\n return None\n else:\n return min(distance)", "def get_nearest_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) <= 1: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def closest_node(node, nodes):\n nodes = np.asarray(nodes)\n deltas = nodes - node\n dist_2 = np.einsum(\"ij,ij->i\", deltas, deltas)\n return np.argmin(dist_2), np.min(dist_2)", "def extract_min(H, ds):\n minDist = approxInf\n u = None # min vertex unknown\n i = 0\n for v in H:\n if ds[v] <= minDist:\n minDist = ds[v]\n u = v # note that u is unused (instead returned by pop)\n imin = i\n i += 1\n return(H.pop(imin)) # return [u, d]", "def min_distance(s1, s2):\n n = len(s1)\n m = len(s2)\n matrix = [([0]*(m+1)) for i in xrange(n+1)]\n for i in xrange(m+1):\n matrix[0][i] = i\n for i in xrange(n+1):\n matrix[i][0] = i\n for i in xrange(1,n+1):\n for j in xrange(1,m+1):\n temp = min(matrix[i-1][j]+1, matrix[i][j-1]+1)\n d = 0 if s1[i-1]==s2[j-1] else 1\n matrix[i][j] = min(temp, matrix[i-1][j-1]+d)\n return matrix[n][m]", "def getNearestLineIndex(row, tagLineNumbers):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # initialize local auxiliary variables {{{\n nearestLineNumber = -1\n nearestLineIndex = -1\n # }}}\n\n # go through all tag line numbers and find the one nearest to the specified row {{{\n for lineIndex, lineNumber in enumerate(tagLineNumbers):\n # if the current line is nearer the current cursor position, take it {{{\n if (nearestLineNumber < lineNumber <= row):\n nearestLineNumber = lineNumber\n nearestLineIndex = lineIndex\n # }}}\n\n # if we've got past the current cursor position, let's end the search {{{\n if (lineNumber >= row):\n break\n # }}}\n # }}}\n\n # return index of the line with the nearest tag\n return nearestLineIndex\n # }}}", "def _low_tri_indices(rowCount):\n for col in range(rowCount):\n for row in range(col, rowCount):\n yield (row, col)", "def closest(self, x, y):\n if self.direction == 'horizontal':\n p_pts = np.array([\n self.ax.transData.transform((p, 0))[0] for p in self.positions\n ])\n dist = abs(p_pts - x)\n else:\n p_pts = np.array([\n self.ax.transData.transform((0, p))[1] for p in self.positions\n ])\n dist = abs(p_pts - y)\n index = np.argmin(dist)\n return index, dist[index]", "def findNearset(x,y,lon,lat):\n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n\n return np.argwhere(dist==dist.min())[0][0]", "def min_horizontal_dist_meters(coords, targets, is_geo=False):\n xe = coords[:, 0]\n ye = coords[:, 1]\n n = len(xe)\n d = np.zeros(n)\n for j in range(n):\n d1 = dist_in_meters(targets, [xe[j], ye[j]], is_geo=is_geo)\n d[j] = d1.min()\n return d", "def findminpath(tab, gxtab, gytab, pixtab):\n\n pathdist = 2 # the number of points each points on a ray can related to on the previous ray\n pathdist_penalty = 0.3 # penalty of the difference of the pathdist\n pathpix_penalty = 2 # penalty of the difference of pixel values between the point and the previous point\n nray = tab.shape[1]\n\n #tab = np.hstack((tab,tab[:, 0].reshape(tab.shape[0], 1)))\n #pixtab = np.hstack((pixtab,pixtab[:, 0].reshape(pixtab.shape[0], 1)))\n #gxtab = np.hstack((gxtab,gxtab[:, 0].reshape(gxtab.shape[0], 1)))\n #gytab = np.hstack((gytab,gytab[:, 0].reshape(gytab.shape[0], 1)))\n\n tab = np.hstack((tab,tab,tab)) # horizontally stack the tab matrix to prepare for the filtering on the result\n pixtab = np.hstack((pixtab,pixtab,pixtab))\n gxtab = np.hstack((gxtab,gxtab,gxtab))\n gytab = np.hstack((gytab,gytab,gytab))\n\n tab = (tab - tab.min()) / (tab.max() - tab.min()) # noralize the tab matrix\n pixtab = (pixtab - pixtab.min()) / (pixtab.max() - pixtab.min()) * -1 # for we want to find the white contour of the cell so we multipy -1 on the pixtab\n # tab = tab / np.median(tab)\n # pixtab = pixtab / np.median(pixtab)\n path = np.zeros(tab.shape)\n path[:, 0] = np.array(range(0, tab.shape[0]))\n score = np.zeros(tab.shape)\n score[:, 1] = tab[:, 1]\n\n for i in range(1, tab.shape[1]):\n for j in range(tab.shape[0]):\n mins = np.Inf # record the min value of the ray\n minat = 0\n for k in range(-pathdist, pathdist+1):\n if(0 <= (j+k) and (j+k) < tab.shape[0]):\n s = pixtab[j, i]\n pixdiff = abs(pixtab[j, i] - pixtab[j+k, i-1])\n s += pixdiff * pathpix_penalty # two kinds of penalty\n s += abs(k) * pathdist_penalty\n s += score[j+k, i-1]\n\n if(s < mins):\n mins = s\n minat = j + k\n path[j, i] = minat\n score[j, i]= mins\n\n start = int(np.argmin(score[:, -1]))\n path = path.astype(np.int32)\n minpath = [start]\n for i in range(tab.shape[1]-1, 0, -1):\n minpath.append(path[minpath[-1], i])\n minpath = minpath[::-1]\n # print(len(minpath))\n minpath = savgol_filter(minpath, 15, 3) # apply a Savitzky-Golay filter to the raw minpath signal\n minpath = minpath[nray:nray*2] # cut the middle part of minpath whose length is nray\n return np.array(minpath).astype(np.int32)", "def nearest_sparse(self, query):\n self.best_dist = float(\"inf\")\n self.best_element = None\n self._register_best_element = self._register_best_element_single \n self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)\n return self.best_element,self.best_dist", "def numNeighbors(minesSet, row_index, cols_index, num_cols, num_rows):\n mines = 0\n for j in np.arange(max(0, cols_index-1), min(num_cols-1, cols_index+1)+1):\n for i in np.arange(max(0, row_index-1), min(num_rows-1, row_index+1)+1):\n if ((i, j) in minesSet):\n mines+=1\n return mines", "def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass", "def _intra_cluster_distance(distances_row, labels, i):\n mask = labels == labels[i]\n mask[i] = False\n if not np.any(mask):\n # cluster of size 1\n return 0\n a = np.mean(distances_row[mask])\n return a", "def matrix_idx(n_hist, n_req, n_rows):\n\n flat_idx = []\n for i in range(n_rows):\n flat_idx.extend(range(i * n_req, (i + 1) * n_req + n_hist))\n # idx = np.unravel_index(flat_idx, (n_rows, n_hist + n_req))\n\n idx_matrix = np.reshape(flat_idx, (n_rows, n_hist + n_req))\n idxX = idx_matrix[:, n_req:]\n idxY = idx_matrix[:, :n_req]\n\n return idxX, idxY", "def closest_waypoint(self, location: pylot.utils.Location):\n min_dist = np.infty\n min_index = 0\n for index, waypoint in enumerate(self.waypoints):\n dist = waypoint.location.distance(location)\n if dist < min_dist:\n min_dist = dist\n min_index = index\n return min_index", "def __find_immediate_predecessors(storm_object_table, target_row):\n\n error_checking.assert_is_integer(target_row)\n error_checking.assert_is_geq(target_row, 0)\n error_checking.assert_is_less_than(\n target_row, len(storm_object_table.index)\n )\n\n predecessor_sec_id_strings = [\n storm_object_table[c].values[target_row]\n for c in PREV_SECONDARY_ID_COLUMNS\n if storm_object_table[c].values[target_row] != ''\n ]\n\n num_predecessors = len(predecessor_sec_id_strings)\n if num_predecessors == 0:\n return numpy.array([], dtype=int)\n\n target_time_unix_sec = storm_object_table[\n tracking_utils.VALID_TIME_COLUMN].values[target_row]\n\n predecessor_rows = numpy.full(num_predecessors, -1, dtype=int)\n\n for i in range(num_predecessors):\n these_rows = numpy.where(numpy.logical_and(\n storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values ==\n predecessor_sec_id_strings[i],\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values <\n target_time_unix_sec\n ))[0]\n\n if len(these_rows) == 0:\n continue\n\n this_subrow = numpy.argmax(\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values[\n these_rows]\n )\n\n predecessor_rows[i] = these_rows[this_subrow]\n\n return predecessor_rows[predecessor_rows >= 0]", "def _partition_nearest(self, medoids, dists, only_these=set()):\n if len(only_these) == 0:\n allowed_inds = self._not_ignored_inds\n else:\n allowed_inds = self._not_ignored_inds & only_these\n closest_medoid_ind = np.argmin(dists[:,medoids], 1) # If len(medoids)==3, would look like [2,1,1,0,1,2,...].\n clusts = [[] for i in medoids]\n for node_ind, med_ind in enumerate(closest_medoid_ind):\n if node_ind in allowed_inds:\n clusts[med_ind].append(node_ind)\n return clusts", "def index_condensed_matrix(n, i, j):\n if i == j:\n main_warning(\"Diagonal elements (i=j) are not stored in condensed matrices.\")\n return None\n elif i > j:\n i, j = j, i\n return int(i * (n - (i + 3) * 0.5) + j - 1)", "def find_closest_pt(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find j index of closest grid point\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n jj = N.argsort(work)[0]\n\n # find i index of closest grid point\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n ii = N.argsort(work)[0]\n\n return ii, jj", "def nearest_neighbors(self):\n neighbor_distances_and_indices = []\n for idx, data_point in enumerate(self.data):\n distance = self.euclidean_dis(data_point[:-1], self.query) # Calculate the distance between the query\n # example and the current example from the data.\n\n neighbor_distances_and_indices.append((distance, idx)) # Add the distance and the index of the example\n # to an ordered collection\n\n sorted_neighbor_distances_and_indices = sorted(neighbor_distances_and_indices, key=lambda x: x[0]) #\n # Sort the ordered collection of distances and indices from smallest to largest (in ascending order) by\n # the distances\n\n k_nearest_distances_and_indices = sorted_neighbor_distances_and_indices[:self.k] # Pick the first K\n # entries from the sorted collection\n\n k_nearest_labels = [self.data[i][1] for distance, i in k_nearest_distances_and_indices] # Get the labels of\n # the selected K entries\n\n return k_nearest_labels, self.mode(k_nearest_labels)", "def closest_centroid(x,centroids):\n\tK =len(centroids)\n\tN = len(x)\n\tDistance = np.zeros((N,K))\n\tfor j in range(K):\n\t\tmu = centroids[j]\n\t\tDistance[:,j] = np.linalg.norm(x-mu,axis=1)\n\tout = np.argmin(Distance,axis=1) \n\treturn out", "def medoid_ft(ftvectors,pairwise_dist_matrix):\r\n N = len(ftvectors)\r\n if N == 1 : \r\n return 0 \r\n # pairwise_dist_matrix = calculate_dist_matrix_ft(ftvectors) \r\n sum_dist = np.sum(pairwise_dist_matrix, axis = 0)\r\n min_idx = np.argmin(sum_dist)\r\n return min_idx", "def nn_dists(coords, spherical=True):\n\n full_dist_matrix = all_dists(coords, spherical)\n\n # find all minimum distances\n # apply min over ranges of the dist array\n min_dists = np.min(full_dist_matrix, axis=1)\n min_ix = np.argmin(full_dist_matrix, axis=1)\n\n return min_dists, min_ix", "def find_indices(colorhs, centres):\n\n indices = np.zeros(colorhs.shape[0], dtype=np.uint8)\n i = 0\n\n for hs in colorhs:\n # Past Euclidian distance\n past_ed = float(\"inf\")\n for cluster in range(centres.shape[0]):\n # Current Euclidian distance\n curr_ed = (sum((hs - centres[cluster, :]) ** 2)) ** 1/2\n # A frame belongs to the cluster with the minimum ed value.\n if curr_ed <= past_ed:\n past_ed = curr_ed\n indices[i] = cluster\n i += 1\n return indices", "def minesweeper(matrix):\n \n num_rows = len(matrix)\n num_cols = len(matrix[0])\n \n adj_mines = []\n \n adj_row = [0]*num_cols\n \n for i in range(num_rows):\n adj_mines.append(adj_row[:])\n \n for r in range(num_rows):\n for c in range(num_cols):\n if matrix[r][c] == True:\n if (r-1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r-1][c-1] += 1\n if (r-1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r-1][c] += 1\n if (r-1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r-1][c+1] += 1\n if (r) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r][c-1] += 1\n if (r) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r][c+1] += 1\n if (r+1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r+1][c-1] += 1\n if (r+1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r+1][c] += 1\n if (r+1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r+1][c+1] += 1\n\n \n return adj_mines", "def findClosetCentroids(X, centroids):\n\tm, n = X.shape\n\tK = centroids.shape[0]\n\tidx = np.zeros(m) # m\n\n\tfor i in range(m):\n\t\ttemp = np.tile(X[i, :], K).reshape(centroids.shape)\n\t\tidx[i] = np.argmin(np.sum((centroids - temp) ** 2, axis=1))\n\treturn idx", "def find_nearest(ref_array,target_array):\n ref_tree = scipy.spatial.cKDTree(ref_array)\n dist, indices = ref_tree.query(target_array, k=1)\n return indices" ]
[ "0.76758415", "0.73902124", "0.64999825", "0.6483356", "0.6479489", "0.64227754", "0.6417629", "0.6413701", "0.6329621", "0.63124496", "0.6312059", "0.6303333", "0.6287571", "0.62725973", "0.6206513", "0.6193971", "0.61772275", "0.61472964", "0.6100118", "0.60979205", "0.6076559", "0.6044639", "0.60424477", "0.60383624", "0.5994276", "0.5977324", "0.59745604", "0.59316355", "0.5921004", "0.59207225", "0.59187675", "0.5914445", "0.59131867", "0.5908921", "0.5902639", "0.5874711", "0.5851722", "0.58390146", "0.5798481", "0.5793911", "0.5791052", "0.5787699", "0.5767839", "0.5752606", "0.5741009", "0.5733529", "0.5732233", "0.57233125", "0.56989825", "0.5698967", "0.56981534", "0.56805634", "0.5673394", "0.56583023", "0.5656402", "0.5655434", "0.565437", "0.56487834", "0.5646905", "0.56460285", "0.5636113", "0.56331956", "0.563113", "0.5623121", "0.56203", "0.561111", "0.5604995", "0.55974776", "0.55969167", "0.5584674", "0.5579417", "0.55760306", "0.5572063", "0.55564016", "0.55543065", "0.5553647", "0.55494267", "0.5549265", "0.5548724", "0.5543832", "0.55302244", "0.551905", "0.5507722", "0.55050474", "0.5502348", "0.5491812", "0.54882824", "0.5487834", "0.54852426", "0.5484282", "0.5482726", "0.54819846", "0.5475506", "0.54594016", "0.5456662", "0.5451533", "0.5442221", "0.543894", "0.5433832", "0.5427434" ]
0.72165185
2
Each row of the matrix, let's say the jth row, represents the distance between the other data point from the jth point. This function returns the indexes for the points with the smallest distances with respect to each point represented by that specified row. By row, I mean the 0th dimension. Also notice that this function includes the target particle, i.e. the diagonal element along the matrix is set to 1.
def nearest_points_indexes_with_self(matrix, num_to_keep): # Set the diagonal to 1 np.fill_diagonal(matrix, 1) # Get the position for the resulted values sort_arg = np.argsort(matrix, axis=1) return sort_arg[:, : num_to_keep]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nearest_min(dist_matrix):\n # much faster than np.where\n i, j = np.unravel_index(\n np.argmin(dist_matrix), \n dims=dist_matrix.shape\n )\n return i, j", "def nearest_points_indexes_without_self(matrix, num_to_keep):\n\n # Set the diagonal to 0\n np.fill_diagonal(matrix, 0)\n # Get the position for the resulted values\n sort_arg = np.argsort(matrix, axis=1)\n\n return sort_arg[:, : num_to_keep]", "def find_min_distance():\n return np.argmin(d)", "def nearest_neighbour(matrix, start=0):\n path = [start]\n while len(matrix) != len(path):\n matrix[:, start] = numpy.inf\n start = numpy.argmin(matrix[start])\n path.append(start)\n return path", "def nearest_points_values_with_self(matrix, num_to_keep):\n\n # Set the diagonal to 1\n np.fill_diagonal(matrix, 1)\n # Get the position for the resulted values\n sort = np.sort(matrix, axis=1)\n\n return sort[:, : num_to_keep]", "def nearest_vertex_to(self, point):\n distances = self.distances_to(point)\n idx = np.argmin(distances)\n return idx", "def smallest_distance(self, clusters):\n i, j = numpy.unravel_index(numpy.argmin(clusters), clusters.shape)\n return clusters[i, j], i, j", "def nearest_cluster(X,c):\n K = np.size(c,0)\n idx = np.zeros((np.size(X,0),1))\n arr = np.empty((np.size(X,0),1))\n for i in range(0,K):\n y = c[i]\n temp = np.ones((np.size(X,0),1))*y\n b = np.power(np.subtract(X,temp),2)\n a = np.sum(b,axis = 1)\n a.resize((np.size(X,0),1))\n arr = np.append(arr, a, axis=1)\n arr = np.delete(arr,0,axis=1)\n idx = np.argmin(arr, axis=1)\n return idx", "def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y", "def find_closest_index(traj, point):\n\n\t#TODO: vectorise function to receive any length of points.\n\n\tdistances = np.subtract(np.array(point),traj) \n\tdistances = distances.reshape(-1,2)\n\t#distances = distances[~np.isnan(distances)].reshape(-1,2)\n\n\t#print(\"distances\")\n\t#pprint(distances)\n\tdist_array = np.linalg.norm(distances, axis = 1)\n\t#pprint(dist_array)\n\t#dist_array = np.sqrt((distances[:,0]**2)+(distances[:,1]**2)) #array of distances from trajectory to gaze landing point in world. \n\tidx = np.nanargmin(abs(dist_array)) #find smallest difference in pythag distance from 0,0 to get closest point. \n\tdists = distances[idx, :]\n\tdist = dist_array[idx]\n\n\treturn idx#, dists, dist\n\t#return idx", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def _find_min_pair(pandas_matrix):\n numpy_matrix = pandas_matrix.values\n mins = np.where(numpy_matrix == np.nanmin(numpy_matrix))\n min_col_idx = mins[0][0]\n min_row_idx = mins[1][0]\n (min_col, min_row) = (pandas_matrix.index[min_col_idx], \n pandas_matrix.columns[min_row_idx])\n\n return (min_col, min_row)", "def find_closest(distances, threshold):\n n = len(distances)\n person_1 = []\n person_2 = []\n d = []\n\n for i in range(n):\n for j in range(i+1, n):\n if distances[i][j] <= threshold:\n person_1.append(i)\n person_2.append(j)\n d.append(distances[i][j])\n\n return person_1, person_2, d", "def nearest_points_values_without_self(matrix, num_to_keep):\n\n # Set the diagonal to 0\n np.fill_diagonal(matrix, 0)\n # Get the position for the resulted values\n sort = np.sort(matrix, axis=1)\n\n return sort[:, : num_to_keep]", "def getRowHeuristics(matrix):\n row, col = matrix.shape\n rHeuristic = np.zeros((row,2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica\n for i in range(0,row):\n rHeuristic[i,0] = int(i)\n #print (i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i])))\n rHeuristic[i,1] = 1/sum(matrix[i,:])\n return rHeuristic[rHeuristic[:,1].argsort()]", "def nearestNeighbor(self, coords, my_index, blacklist):\n min_index, min_distance = 0, 999999999\n my_col, my_row = coords[my_index]\n for i, (col, row) in enumerate(coords):\n if i in blacklist:\n continue\n distance = math.sqrt((my_col-col)**2 + (my_row-row)**2)\n if distance < min_distance:\n min_index, min_distance = i, distance\n return min_index", "def _nearest_cluster_distance(distances_row, labels, i):\n label = labels[i]\n b = np.min([np.mean(distances_row[labels == cur_label])\n for cur_label in set(labels) if not cur_label == label])\n return b", "def getNearestSampleIndex(test, trainX):\n dist_matrix = test - trainX\n dist_square = dist_matrix ** 2\n dist_sums = dist_square.sum(axis=1)\n distance_vector = np.sqrt(dist_sums)\n return (distance_vector).argmin()", "def find_closest_trajectory_pose(self):\n np_state = numpy.array([[self.x], [self.y]])\n temp_distance = numpy.sum(\n (self.np_trajectory[0:2, :] - np_state) ** 2,\n axis=0)\n best_idx = numpy.argmin(temp_distance)\n return best_idx", "def _compute_euclidean_neigh_matrix(src, d_matrix, radius):\n\n n_max = 100\n n_min = 3\n reached_points = np.array([0])\n counter = 0\n n_neigh = []\n list_neigh = []\n\n while counter < reached_points.shape[0] < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n # Check the number of neighbours\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n list_neigh.append(aux)\n reached_points = np.append(reached_points,\n aux[~np.in1d(aux, reached_points)])\n counter += 1\n\n if counter >= reached_points.shape[0]:\n raise ValueError('Too small value of the radius:'\n 'the neighbour-matrix is not connected')\n elif src.shape[0] == reached_points.shape[0]:\n while counter < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n\n list_neigh.append(aux)\n counter += 1\n\n n_neigh_max = max(n_neigh)\n n_matrix = np.zeros([src.shape[0],\n n_neigh_max], dtype=int) - 1\n for i in range(src.shape[0]):\n n_matrix[i, 0:list_neigh[i].shape[0]] = list_neigh[i]\n index_ord = np.argsort(n_matrix[:, 0])\n n_matrix = n_matrix[index_ord]\n return n_matrix\n else:\n raise RuntimeError(\"Some problems during\"\n \"computation of neighbours.\")", "def findknn(xTr,xTe,k):\n\n # YOUR CODE HERE\n if k > len(xTr):\n k = len(xTr)\n \n D=l2distance(xTe, xTr)\n (m,n) = D.shape\n \n indices = []\n dists = []\n for i in range(m):\n smallest_indices = np.argsort(D[i])\n ind = smallest_indices[:k]\n dis = D[i,smallest_indices[:k]]\n indices.append(ind)\n dists.append(dis)\n \n indices = np.transpose(np.array(indices))\n dists = np.transpose(np.array(dists))\n return indices, dists", "def saddle_points(matrix):\n if not all(len(row) == len(matrix[0]) for row in matrix[1:]):\n raise ValueError('Provided matrix is irregular.')\n columns = [col for col in zip(*matrix)]\n points = set()\n for ridx, row in enumerate(matrix):\n for cidx, element in enumerate(row):\n if element == max(row) and element == min(columns[cidx]):\n points.add((ridx, cidx))\n return points", "def _first_index_with_smaller_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] <= P[i]:\n i -= 1\n return i", "def min_distance_vertex(distance, visited):\n vertices = len(visited)\n min_distance = INF\n min_index = None\n for v in range(vertices):\n if not visited[v] and distance[v] <= min_distance:\n min_distance = distance[v]\n min_index = v\n return min_index", "def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def closest_points_naive(self, x, y):\r\n # Running time: O(n ** 2)\r\n\r\n dist = []\r\n for i in range(len(x)):\r\n for j in range(i+1, len(x)):\r\n d = self.get_euclidean_distance(x[i], x[j], y[i], y[j])\r\n dist.append(d)\r\n \r\n return min(dist)", "def shortestDistance(self, grid):\n # return self.house_oriented_TLE(grid)\n # One axis\n row_count = [sum(row) for row in grid]\n col_count = [0]* len(grid[0])\n row_dist = [0]* len(grid)\n col_dist = [0]* len(grid[0])\n output = sys.maxsize\n for i in range(len(grid)): \n for j in range(len(grid[0])):\n col_count[j] += grid[i][j]\n \n for index_p in range(len(row_count)):\n for index_h in range(len(row_count)):\n row_dist[index_p] += abs(index_h - index_p) * row_count[index_h]\n \n for index_p in range(len(col_count)):\n for index_h in range(len(col_count)):\n col_dist[index_p] += abs(index_h - index_p) * col_count[index_h]\n \n # print(row_count)\n # print(col_count)\n # print(row_dist)\n # print(col_dist)\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n continue\n output = min(output, row_dist[i] + col_dist[j])\n return output", "def find_min_point(points):\r\n smallest_point_index = 0\r\n for i in range(1, len(points)):\r\n if points[i][1] < points[smallest_point_index][1]:\r\n smallest_point_index = i\r\n elif points[i][0] > points[smallest_point_index][0] and points[i][1] == points[smallest_point_index][1]:\r\n smallest_point_index = i\r\n return smallest_point_index", "def dist(dm, sm, neighbors):\n\n # Initialize list of possible distances\n distances = []\n\n # loop over all neighbors of the cell\n for neighbor in neighbors:\n # If the neighbor is valid\n if dm[neighbor[0], neighbor[1]] != -1:\n # add neighbor distance + 1 to possible distances\n distances.append(dm[neighbor[0], neighbor[1]] + 1)\n\n # return minimal distance\n return np.min(distances)", "def get_min_dist(x0, y0, arr):\n dist = np.hypot(arr.T[0] - x0, arr.T[1] - y0)\n min_dist = np.min(dist)\n val = np.argmin(dist)\n return min_dist, arr[val]", "def nodes_min_energy_index(self, node):\n idx = -1\n curr_energy = np.inf\n for i in range(self.cost_matrix.shape[1]):\n new_energy = self.cost_matrix[node][i]\n if new_energy < curr_energy:\n curr_energy = new_energy\n idx = i\n return idx", "def get_maxmin_index_from_row(\n distance_matrix: np.ndarray,\n row: int,\n previous_indexes: List,\n type: str,\n )-> int:\n distance_matrix = distance_matrix.copy()\n arr = distance_matrix[row].astype(float)\n \n aux_list = range(arr.shape[0])\n aux_list_2 = []\n for i in aux_list:\n if i in previous_indexes:\n aux_list_2.append(True)\n else:\n aux_list_2.append(False)\n previous_indexes_bool = aux_list_2\n \n if type == 'max':\n arr[previous_indexes_bool] = -1\n target_index = np.argmax(arr)\n if type == 'min':\n arr[previous_indexes_bool] = np.Inf\n target_index = np.argmin(arr)\n \n return target_index", "def get_idx(lons, lats, lon, lat):\n dist = ((lons - lon) ** 2 + (lats - lat) ** 2) ** 0.5\n return np.unravel_index(dist.argmin(), dist.shape)", "def pair_idx(rows, comm=None):\n raise Exception(\"Not implemented\")\n \n if comm == None:\n comm = MPI.COMM_WORLD\n \n total = comb(rows,2,exact=True)\n size = comm.Get_size()\n \n size = 1000\n \n print(total / size)\n \n target = total / size\n \n current_row = 0\n calc_list = []\n row_list = [[] for x in range(size)]\n for rank in range(size):\n row_list[rank].append(current_row)\n \n current_calcs = 0\n \n for value in range(current_row, rows):\n current_calcs += value\n if current_calcs > target:\n if rank == size-1:\n pass\n else:\n break\n \n calc_list.append(current_calcs)\n row_list[rank].append(value)\n current_row = value\n \n return row_list,calc_list", "def _findMin(p, A):\n\n m=(-1, (0,0))\n for p0 in A:\n dist = np.linalg.norm(p0-np.array(p))\n if m[0]==-1 or m[0]>dist:\n m = (dist, p0)\n \n return tuple(m[1])", "def distance_matrix(n_row, n_col):\n\n n_pop = int(n_row * n_col)\n center = int(n_row/2*(n_col+1))\n\n pop_idx = np.arange(n_pop)\n pop_idx_col = np.remainder(pop_idx, n_col)\n pop_idx_row = pop_idx // n_row\n\n pos = np.vstack((pop_idx_col,pop_idx_row)).T\n distance = spa.distance.cdist([pos[center]], pos)[0]\n\n return distance", "def min(weightData , dataSetVector ):\r\n # weightData: pass the whole weightData array.\r\n # dataSetVector: pass the a data vector to compare with weightdata array, to find its closest match\r\n winnerIndex = 0 #flag for initalizing the winner index\r\n minValue = EcuDist(dataSetVector,weightData[0]) # initalize the minValue\r\n # iterate through all weighdata rows to find the closest match, depending on ecu. distance,\r\n #and then return the index of the closest match(winner)\r\n for i in range(weightData.shape[0]):\r\n if(EcuDist(dataSetVector,weightData[i]) < minValue):\r\n minValue = EcuDist(dataSetVector,weightData[i])\r\n winnerIndex = i\r\n return winnerIndex", "def find_min(self, A, w):\n import numpy as np\n\n vcost = self.INFINITY\n vto = vfrom = -1\n for v in w:\n # Get array offset of minimum of this vertex\n i = np.argmin(A[v,:])\n if A[v,i] < vcost:\n vcost = A[v,i]\n vto = i\n vfrom = v\n return (vfrom, vto, vcost)", "def get_nearest_atom_inds_per_mol(self):\n self.closest_at_per_mol = np.zeros((self.nmol,\n self.at_per_mol,\n self.at_per_mol-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.at_per_mol)\n for imol in range(self.nmol):\n for iat in range(self.at_per_mol):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist_per_mol[imol, iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_at_per_mol[imol, iat] = at_inds", "def find_nearest_neighbours_SURF(averageDistance,inst,distanceArray,maxInst): \r\n NN=[]\r\n min_indices=[] \r\n\r\n for j in range(maxInst):\r\n if inst != j:\r\n locator = [inst,j]\r\n locator = sorted(locator, reverse=True) #Access corect half of table (result of removed table redundancy)\r\n d = distanceArray[locator[0]][locator[1]]\r\n if d<averageDistance:\r\n min_indices.append(j)\r\n \r\n for j in range(len(min_indices)):\r\n NN.append(min_indices[j])\r\n \r\n return NN", "def find_nearest_neighbors(p, points, k=5):\n dist = np.zeros(points.shape[0])\n for i in range(len(dist)):\n dist[i] = distance(p, points[i])\n ind = np.argsort(dist)\n return ind[0:k]", "def update_distmatrix(min_idx, dist_matrix):\n i, j = min_idx\n new_cluster_dist = np.minimum(dist_matrix[i, :], dist_matrix[j, :])\n new_cluster_dist[i] = np.inf\n \n dist_matrix[i, :] = new_cluster_dist\n dist_matrix[:, i] = new_cluster_dist\n \n dist_matrix[j, :] = np.inf\n dist_matrix[:, j] = np.inf\n \n return dist_matrix", "def get_nearest_node_index(node_list, random_node):\n\n dist_list = [\n (node.x - random_node.x) ** 2 + (node.y - random_node.y) ** 2\n for node in node_list\n ]\n minind = dist_list.index(min(dist_list))\n\n return minind", "def getNeighbors(training_data, test_row, k):\n\n distances = list()\n for training_row in training_data:\n dist = euclidianDistance(training_row, test_row)\n distances.append([training_row, dist])\n \n #Sort on the basis of dist\n distances.sort(key=lambda row:row[1])\n\n neighbors = list()\n\n for i in range(int(k)):\n neighbors.append(distances[i][0])\n\n return neighbors", "def eeg_findnearest(x,X):\t\n\t#x array or vector and X a scalar\n\tabsdif = np.abs(x-X)\n\tval = np.min(absdif)\n\tidx = absdif.argmin()\n\treturn val,idx", "def get_nearest_atom_inds(self):\n # Create empty data structure\n self.closest_ats = np.zeros((self.natom, self.natom-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.natom)\n for iat in range(self.natom):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist[iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_ats[iat] = at_inds", "def closestCluster(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n\treturn bestIndex", "def get_min_shannon_entropy(grid):\r\n curr_min = math.inf\r\n curr_best = []\r\n for i in range(len(grid[0])):\r\n for j in range(len(grid)):\r\n if not grid[j][i].collapsed:\r\n w = grid[j][i].block_weights\r\n shannon_entropy = sum([-math.log(el) for el in w] )\r\n if shannon_entropy < curr_min:\r\n curr_min = shannon_entropy\r\n curr_best = [(i,j)]\r\n elif shannon_entropy == curr_min:\r\n curr_best.append((i,j))\r\n idx = np.random.choice(range(len(curr_best))) #choose randomly if theres a tie\r\n return curr_best[idx] #x,y\r", "def findSmallest(distancesWithNames):\n smallest = distancesWithNames[0][2]\n smallestIndex = -1\n for i in range(len(distancesWithNames)):\n if smallest >= distancesWithNames[i][2]:\n smallest = distancesWithNames[i][2]\n smallestIndex = i\n return smallestIndex", "def get_index_under_point(self, event):\r\n xy = np.asarray(list(zip(self.xs, self.ys)))\r\n xyt = self.line.get_transform().transform(xy)\r\n xt, yt = xyt[:, 0], xyt[:, 1]\r\n d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2)\r\n pt_idx = np.argmin(d)\r\n if d[pt_idx] >= self.max_pixels_from_vertex:\r\n pt_idx = None\r\n return pt_idx", "def find_nearest_neighbour_from_point(point_cloud:np.ndarray, point:int) -> int:\n pass", "def find_min_path(s, t, dist):\n\n rows = len(dist) - 1\n cols = len(dist[0]) - 1\n col = cols\n row = rows\n pos_str = \"Position: (row={} col={}) -> (row={} col={})\"\n cst_str = \"Cost: {}\"\n prev_row = row\n prev_col = col\n\n # init sparse path matrix\n sparse_path = [[\" \" for x in range(cols + 1)] for x in range(rows + 1)]\n sparse_path[0][0] = \"0\"\n\n # start with operation at (rows, cols) and work backwards\n sparse_path[rows][cols] = dist[rows][cols]\n\n if verbose == 2:\n print()\n print(\"Initial Minimum Path Matrix:\")\n print_matrix(s, t, sparse_path)\n\n while True:\n\n # bail out if we are in the corner\n if row == 0 and col == 0:\n break\n\n # if we are not at a matrix boundary\n if row != 0 and col != 0: # if at left edge or top row, cannot move diagonally\n\n # diagonal\n if (dist[row - 1][col - 1] == min(dist[row - 1][col],\n dist[row][col - 1],\n dist[row - 1][col - 1])) and (dist[row - 1][col - 1] == dist[row][col] or dist[row - 1][col - 1] == dist[row][col] - 1):\n sparse_path[row - 1][col - 1] = dist[row - 1][col - 1]\n temp_cost = dist[row - 1][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # left\n elif dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # above\n else:\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # if at matrix edge, can only move up\n elif col == 0:\n # above\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # must be at row boundary, can only move left\n else:\n # left\n if dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # print matrix\n if verbose == 2:\n print_matrix(s, t, sparse_path)\n\n return sparse_path", "def closestClusterAndDistance(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n return (bestIndex, closest)", "def get_nearest_indices(vector, matrix, num=5):\n if len(matrix) < num:\n num = len(matrix)\n\n m, n = matrix.shape\n diff_matrix = np.tile(vector, (m, 1)) - matrix\n diff_matrix = abs(diff_matrix)\n\n distance = diff_matrix.sum(axis=1)\n sortIndices = np.argsort(distance)\n sortIndices = sortIndices[0:num]\n\n return sortIndices[random.randint(0, num - 1)]", "def detec_mfo_dist(betas):\n k = len(betas)\n min_dist = np.inf\n for i in range(k-1):\n for j in range(i+1,k):\n distance = np.sum((betas[i,:] - betas[j,:])**2)\n if distance < min_dist:\n MFO_index = [i,j]\n min_dist = distance\n return MFO_index", "def get_nearest_row(self):\n return (self.rect.top - (self.screen.get_height() // 12)) // self.maze.block_size", "def smallest_cert(pts):\n # 1. project the points\n def projected_flattened_pt(pt):\n mag = distance(pt)\n distance_wanted = rect_dist\n return distance_wanted/mag*pt[1], \\\n distance_wanted/mag*pt[2]\n\n flat_pts = np.array([[projected_flattened_pt(pt), pt] for pt in pts])\n\n # 2. identify rows of data\n data = {} \n # maps height (from ground) to the \"row\" (scan) list of\n # horizontal-only (1D) data\n for pt, origpt in flat_pts:\n for row_height in data:\n if abs(pt[1]-row_height) < dist_bt_rows/2:\n data[row_height].append([pt[0], origpt])\n break\n else:\n data[pt[1]] = [[pt[0], origpt]]\n\n #3. build up final_data so each row has enough point x-density\n def get_left_i(pts, left_edge):\n # get last index <= left_edge\n for i, pt in enumerate(pts):\n if pt[0] > left_edge:\n return i - 1\n\n def get_right_i(pts, right_edge):\n # get first index >= right_edge\n i = len(pts)-1\n while True:\n pt = pts[i]\n if pt[0] < right_edge:\n return i + 1\n i -= 1\n\n final_data = []\n for _, subdata in data.items():\n subdata = sorted(subdata, key=lambda t: t[0])\n i = get_left_i(subdata, lane_left)\n end = get_right_i(subdata, lane_right)\n\n final_data = [subdata[i][1]]\n while i < end:\n next_i = get_left_i(subdata[i:], \n subdata[i][0]+max_xpt_separation)\n if not next_i:\n return False\n i += next_i\n final_data.append(subdata[i][1])\n\n return final_data", "def _lowestDistanceToCluster(self, clusters: ndarray, sequenceIdx: int) -> Tuple[int, float]:\n lowestClusterIdx = -1\n lowestDistance = np.inf\n for cIdx in range(self.numClusters):\n distance = self._distanceToCluster(clusters[cIdx], sequenceIdx)\n if distance < lowestDistance:\n lowestClusterIdx = cIdx\n lowestDistance = distance\n return lowestClusterIdx, lowestDistance", "def find_closest_atom(coords1, coords2):\n\n coords1 = np.array(coords1)\n coords2 = np.array(coords2)\n diff = coords2[:, np.newaxis] - coords1[np.newaxis, :]\n dist = np.einsum('ijk->ij', diff**2)**0.5\n index = np.argmin(dist)\n return index", "def closestCentroids(self, points , centroids ):\n dists = scipy.spatial.distance.cdist(points,centroids)\n # 1 is dimension\n minIds = numpy.argmin(dists, 1)\n return minIds", "def find_neighbor_indices(atoms, probe, k):\r\n coords_all = atoms.getCoords()\r\n neighbor_indices = []\r\n atom_k = atoms[k]\r\n radius = atom_k.getRadius() + probe + probe\r\n\r\n indices = range(k)\r\n indices.extend(range(k+1, len(atoms)))\r\n\r\n for i in indices:\r\n\tdist = pos_distance(coords_all[k], coords_all[i])\r\n\t#dist = np.linalg.norm(coords_all[k] - coords_all[i])\r\n\tif dist < radius + atoms[i].getRadius():\r\n neighbor_indices.append(i)\r\n\r\n return neighbor_indices", "def exhaustive_search(X, z):\n #initialize shortest path and distance to the first row\n d_star = la.norm(z - X[0])\n x_star = X[0]\n #check the distance from each row of X and if shortest thus far, save it\n for i in range(X.shape[0]):\n #calculate distance from current row to target z\n x = X[i,:]\n cur_dist = la.norm(x - z)\n #if the distance is smallest thus far, save the row and distance\n if cur_dist < d_star:\n x_star = x\n d_star = cur_dist\n return x_star, d_star", "def _retrieve_neighbors(df, i_point, point, eps, column):\n neigborhood = []\n for index, row in df.iterrows():\n if index != i_point:\n a = np.array(point[column])\n b = np.array([row[column]])\n distance = np.linalg.norm(a - b)\n if distance <= eps:\n neigborhood.append(index)\n\n return neigborhood", "def closest_node(node, nodes):\n nodes = np.asarray(nodes)\n deltas = nodes - node\n dist_2 = np.einsum(\"ij,ij->i\", deltas, deltas)\n return np.argmin(dist_2), np.min(dist_2)", "def find_nearest_neighbors(p, points, k):\n\timport numpy as np\n\tdistances = np.zeros(points.shape[0])\n\tfor i in range(len(distances)):\n\t\tdistances[i] = distance(p,points[i])\n\tind = np.argsort(distances)\n\treturn ind[0:k]", "def nearest_neighbor(data):\n features = set([i for i, x in enumerate(data[0][1])])\n return leave_one_out_cross_validation(data, features)", "def djikstre(connection_mat):\n n = connection_mat.shape[0]\n dist, prev = {}, {}\n Q = list(range(n))\n \n for i in Q:\n dist[i] = np.inf\n dist[n-2] = 0.0\n \n while(len(Q)>0):\n\n min_dist = min([dist[key] for key in Q])\n u = [key for key in Q if dist[key] == min_dist][0]\n Q.remove(u)\n\n for v in np.nonzero(connection_mat[:, u])[0]:\n \n alt = dist[u]+connection_mat[v, u]\n \n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n \n return dist, prev", "def neirest_neighbour(business, cells):\n array = cells.get_neighbours(business, num=1)\n neighbours = pd.DataFrame(array).set_index('index')\n index = neighbours['distance'].idxmin()\n return neighbours.loc[index]", "def closest(self, x, y):\n if self.direction == 'horizontal':\n p_pts = np.array([\n self.ax.transData.transform((p, 0))[0] for p in self.positions\n ])\n dist = abs(p_pts - x)\n else:\n p_pts = np.array([\n self.ax.transData.transform((0, p))[1] for p in self.positions\n ])\n dist = abs(p_pts - y)\n index = np.argmin(dist)\n return index, dist[index]", "def _nearest_point_index(points, point):\n distance = sys.float_info.max\n index = None\n for i, p in enumerate(points):\n temp = _vec_distance(p, point)\n if temp < distance:\n distance = temp\n index = i\n return index, distance", "def _kth_nearest_neighbor_dist(\n distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix], k\n) -> np.ndarray:\n\n if not is_integer(k):\n raise ValueError(f\"parameter 'k={k}' must be a positive integer\")\n else:\n # make sure we deal with Python built-in\n k = int(k)\n\n if not (0 <= k <= distance_matrix.shape[1]):\n raise ValueError(\n \"'k' must be an integer between 1 and \"\n f\"distance_matrix.shape[1]={distance_matrix.shape[1]}\"\n )\n\n if isinstance(distance_matrix, np.ndarray):\n dist_knn = np.partition(distance_matrix, k - 1, axis=1)[:, k - 1]\n elif isinstance(distance_matrix, scipy.sparse.csr_matrix):\n # see mircobenchmark_kth_nn.py for a comparison of implementations for the\n # sparse case\n\n def _get_kth_largest_elements_sparse(\n data: np.ndarray,\n indptr: np.ndarray,\n row_nnz,\n k_neighbor: int,\n ):\n dist_knn = np.zeros(len(row_nnz))\n for i in range(len(row_nnz)):\n start_row = indptr[i]\n dist_knn[i] = np.partition(\n data[start_row : start_row + row_nnz[i]], k_neighbor - 1\n )[k_neighbor - 1]\n\n return dist_knn\n\n row_nnz = distance_matrix.getnnz(axis=1)\n\n if (row_nnz < k).any():\n raise ValueError(\n f\"There are {(row_nnz < k).sum()} points that \"\n f\"do not have at least k_neighbor={k}.\"\n )\n\n dist_knn = _get_kth_largest_elements_sparse(\n distance_matrix.data,\n distance_matrix.indptr,\n row_nnz,\n k,\n )\n else:\n raise TypeError(f\"type {type(distance_matrix)} not supported\")\n\n return dist_knn", "def _first_index_with_bigger_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] >= P[i]:\n i -= 1\n return i", "def getNearestLineIndex(row, tagLineNumbers):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # initialize local auxiliary variables {{{\n nearestLineNumber = -1\n nearestLineIndex = -1\n # }}}\n\n # go through all tag line numbers and find the one nearest to the specified row {{{\n for lineIndex, lineNumber in enumerate(tagLineNumbers):\n # if the current line is nearer the current cursor position, take it {{{\n if (nearestLineNumber < lineNumber <= row):\n nearestLineNumber = lineNumber\n nearestLineIndex = lineIndex\n # }}}\n\n # if we've got past the current cursor position, let's end the search {{{\n if (lineNumber >= row):\n break\n # }}}\n # }}}\n\n # return index of the line with the nearest tag\n return nearestLineIndex\n # }}}", "def shortestpathij(self, i, j):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for i in range(len(pathlist)):\n Temp = 0\n for j in range(len(pathlist[i]) - 1):\n Temp += self.Dismatrix[pathlist[i][j], pathlist[i][j+1]]\n distance.append(Temp)\n \n if(len(distance) == 0):\n return None\n else:\n return min(distance)", "def min_horizontal_dist_meters(coords, targets, is_geo=False):\n xe = coords[:, 0]\n ye = coords[:, 1]\n n = len(xe)\n d = np.zeros(n)\n for j in range(n):\n d1 = dist_in_meters(targets, [xe[j], ye[j]], is_geo=is_geo)\n d[j] = d1.min()\n return d", "def findNearset(x,y,lon,lat):\n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n\n return np.argwhere(dist==dist.min())[0][0]", "def get_nearest_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) <= 1: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def _low_tri_indices(rowCount):\n for col in range(rowCount):\n for row in range(col, rowCount):\n yield (row, col)", "def min_distance(s1, s2):\n n = len(s1)\n m = len(s2)\n matrix = [([0]*(m+1)) for i in xrange(n+1)]\n for i in xrange(m+1):\n matrix[0][i] = i\n for i in xrange(n+1):\n matrix[i][0] = i\n for i in xrange(1,n+1):\n for j in xrange(1,m+1):\n temp = min(matrix[i-1][j]+1, matrix[i][j-1]+1)\n d = 0 if s1[i-1]==s2[j-1] else 1\n matrix[i][j] = min(temp, matrix[i-1][j-1]+d)\n return matrix[n][m]", "def extract_min(H, ds):\n minDist = approxInf\n u = None # min vertex unknown\n i = 0\n for v in H:\n if ds[v] <= minDist:\n minDist = ds[v]\n u = v # note that u is unused (instead returned by pop)\n imin = i\n i += 1\n return(H.pop(imin)) # return [u, d]", "def findminpath(tab, gxtab, gytab, pixtab):\n\n pathdist = 2 # the number of points each points on a ray can related to on the previous ray\n pathdist_penalty = 0.3 # penalty of the difference of the pathdist\n pathpix_penalty = 2 # penalty of the difference of pixel values between the point and the previous point\n nray = tab.shape[1]\n\n #tab = np.hstack((tab,tab[:, 0].reshape(tab.shape[0], 1)))\n #pixtab = np.hstack((pixtab,pixtab[:, 0].reshape(pixtab.shape[0], 1)))\n #gxtab = np.hstack((gxtab,gxtab[:, 0].reshape(gxtab.shape[0], 1)))\n #gytab = np.hstack((gytab,gytab[:, 0].reshape(gytab.shape[0], 1)))\n\n tab = np.hstack((tab,tab,tab)) # horizontally stack the tab matrix to prepare for the filtering on the result\n pixtab = np.hstack((pixtab,pixtab,pixtab))\n gxtab = np.hstack((gxtab,gxtab,gxtab))\n gytab = np.hstack((gytab,gytab,gytab))\n\n tab = (tab - tab.min()) / (tab.max() - tab.min()) # noralize the tab matrix\n pixtab = (pixtab - pixtab.min()) / (pixtab.max() - pixtab.min()) * -1 # for we want to find the white contour of the cell so we multipy -1 on the pixtab\n # tab = tab / np.median(tab)\n # pixtab = pixtab / np.median(pixtab)\n path = np.zeros(tab.shape)\n path[:, 0] = np.array(range(0, tab.shape[0]))\n score = np.zeros(tab.shape)\n score[:, 1] = tab[:, 1]\n\n for i in range(1, tab.shape[1]):\n for j in range(tab.shape[0]):\n mins = np.Inf # record the min value of the ray\n minat = 0\n for k in range(-pathdist, pathdist+1):\n if(0 <= (j+k) and (j+k) < tab.shape[0]):\n s = pixtab[j, i]\n pixdiff = abs(pixtab[j, i] - pixtab[j+k, i-1])\n s += pixdiff * pathpix_penalty # two kinds of penalty\n s += abs(k) * pathdist_penalty\n s += score[j+k, i-1]\n\n if(s < mins):\n mins = s\n minat = j + k\n path[j, i] = minat\n score[j, i]= mins\n\n start = int(np.argmin(score[:, -1]))\n path = path.astype(np.int32)\n minpath = [start]\n for i in range(tab.shape[1]-1, 0, -1):\n minpath.append(path[minpath[-1], i])\n minpath = minpath[::-1]\n # print(len(minpath))\n minpath = savgol_filter(minpath, 15, 3) # apply a Savitzky-Golay filter to the raw minpath signal\n minpath = minpath[nray:nray*2] # cut the middle part of minpath whose length is nray\n return np.array(minpath).astype(np.int32)", "def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass", "def numNeighbors(minesSet, row_index, cols_index, num_cols, num_rows):\n mines = 0\n for j in np.arange(max(0, cols_index-1), min(num_cols-1, cols_index+1)+1):\n for i in np.arange(max(0, row_index-1), min(num_rows-1, row_index+1)+1):\n if ((i, j) in minesSet):\n mines+=1\n return mines", "def index_condensed_matrix(n, i, j):\n if i == j:\n main_warning(\"Diagonal elements (i=j) are not stored in condensed matrices.\")\n return None\n elif i > j:\n i, j = j, i\n return int(i * (n - (i + 3) * 0.5) + j - 1)", "def nearest_sparse(self, query):\n self.best_dist = float(\"inf\")\n self.best_element = None\n self._register_best_element = self._register_best_element_single \n self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)\n return self.best_element,self.best_dist", "def find_closest_pt(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find j index of closest grid point\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n jj = N.argsort(work)[0]\n\n # find i index of closest grid point\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n ii = N.argsort(work)[0]\n\n return ii, jj", "def find_nearest(ref_array,target_array):\n ref_tree = scipy.spatial.cKDTree(ref_array)\n dist, indices = ref_tree.query(target_array, k=1)\n return indices", "def locate_source(p,d):\n # M = sensors, n = dimensions\n M, n = p.shape\n p = np.matrix( p ).T\n\n # pick closest receiver\n c = np.argmin(d)\n #sensors delta time relative to sensor c\n d = d - min(d)\n\n indices = list(range(M))\n del indices[c]\n\n A = np.zeros([M-2,n])\n b = np.zeros([M-2,1])\n\n i = indices[0]\n for row,j in enumerate(indices[1:]):\n A[row,:] = 2*( (d[j])*(p[:,i]-p[:,c]).T - \\\n (d[i])*(p[:,j]-p[:,c]).T )\n b[row,0] = (d[i])*((d[j])**2-p[:,j].T*p[:,j]) + \\\n ((d[i])-(d[j]))*p[:,c].T*p[:,c] + \\\n (d[j])*(p[:,i].T*p[:,i]-(d[i])**2)\n\n\n x = np.asarray( np.linalg.lstsq(A,b)[0] )[:,0]\n return x", "def _intra_cluster_distance(distances_row, labels, i):\n mask = labels == labels[i]\n mask[i] = False\n if not np.any(mask):\n # cluster of size 1\n return 0\n a = np.mean(distances_row[mask])\n return a", "def matrix_idx(n_hist, n_req, n_rows):\n\n flat_idx = []\n for i in range(n_rows):\n flat_idx.extend(range(i * n_req, (i + 1) * n_req + n_hist))\n # idx = np.unravel_index(flat_idx, (n_rows, n_hist + n_req))\n\n idx_matrix = np.reshape(flat_idx, (n_rows, n_hist + n_req))\n idxX = idx_matrix[:, n_req:]\n idxY = idx_matrix[:, :n_req]\n\n return idxX, idxY", "def minesweeper(matrix):\n \n num_rows = len(matrix)\n num_cols = len(matrix[0])\n \n adj_mines = []\n \n adj_row = [0]*num_cols\n \n for i in range(num_rows):\n adj_mines.append(adj_row[:])\n \n for r in range(num_rows):\n for c in range(num_cols):\n if matrix[r][c] == True:\n if (r-1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r-1][c-1] += 1\n if (r-1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r-1][c] += 1\n if (r-1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r-1][c+1] += 1\n if (r) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r][c-1] += 1\n if (r) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r][c+1] += 1\n if (r+1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r+1][c-1] += 1\n if (r+1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r+1][c] += 1\n if (r+1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r+1][c+1] += 1\n\n \n return adj_mines", "def nearest_neighbors(self):\n neighbor_distances_and_indices = []\n for idx, data_point in enumerate(self.data):\n distance = self.euclidean_dis(data_point[:-1], self.query) # Calculate the distance between the query\n # example and the current example from the data.\n\n neighbor_distances_and_indices.append((distance, idx)) # Add the distance and the index of the example\n # to an ordered collection\n\n sorted_neighbor_distances_and_indices = sorted(neighbor_distances_and_indices, key=lambda x: x[0]) #\n # Sort the ordered collection of distances and indices from smallest to largest (in ascending order) by\n # the distances\n\n k_nearest_distances_and_indices = sorted_neighbor_distances_and_indices[:self.k] # Pick the first K\n # entries from the sorted collection\n\n k_nearest_labels = [self.data[i][1] for distance, i in k_nearest_distances_and_indices] # Get the labels of\n # the selected K entries\n\n return k_nearest_labels, self.mode(k_nearest_labels)", "def __find_immediate_predecessors(storm_object_table, target_row):\n\n error_checking.assert_is_integer(target_row)\n error_checking.assert_is_geq(target_row, 0)\n error_checking.assert_is_less_than(\n target_row, len(storm_object_table.index)\n )\n\n predecessor_sec_id_strings = [\n storm_object_table[c].values[target_row]\n for c in PREV_SECONDARY_ID_COLUMNS\n if storm_object_table[c].values[target_row] != ''\n ]\n\n num_predecessors = len(predecessor_sec_id_strings)\n if num_predecessors == 0:\n return numpy.array([], dtype=int)\n\n target_time_unix_sec = storm_object_table[\n tracking_utils.VALID_TIME_COLUMN].values[target_row]\n\n predecessor_rows = numpy.full(num_predecessors, -1, dtype=int)\n\n for i in range(num_predecessors):\n these_rows = numpy.where(numpy.logical_and(\n storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values ==\n predecessor_sec_id_strings[i],\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values <\n target_time_unix_sec\n ))[0]\n\n if len(these_rows) == 0:\n continue\n\n this_subrow = numpy.argmax(\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values[\n these_rows]\n )\n\n predecessor_rows[i] = these_rows[this_subrow]\n\n return predecessor_rows[predecessor_rows >= 0]", "def closest_waypoint(self, location: pylot.utils.Location):\n min_dist = np.infty\n min_index = 0\n for index, waypoint in enumerate(self.waypoints):\n dist = waypoint.location.distance(location)\n if dist < min_dist:\n min_dist = dist\n min_index = index\n return min_index", "def medoid_ft(ftvectors,pairwise_dist_matrix):\r\n N = len(ftvectors)\r\n if N == 1 : \r\n return 0 \r\n # pairwise_dist_matrix = calculate_dist_matrix_ft(ftvectors) \r\n sum_dist = np.sum(pairwise_dist_matrix, axis = 0)\r\n min_idx = np.argmin(sum_dist)\r\n return min_idx", "def _partition_nearest(self, medoids, dists, only_these=set()):\n if len(only_these) == 0:\n allowed_inds = self._not_ignored_inds\n else:\n allowed_inds = self._not_ignored_inds & only_these\n closest_medoid_ind = np.argmin(dists[:,medoids], 1) # If len(medoids)==3, would look like [2,1,1,0,1,2,...].\n clusts = [[] for i in medoids]\n for node_ind, med_ind in enumerate(closest_medoid_ind):\n if node_ind in allowed_inds:\n clusts[med_ind].append(node_ind)\n return clusts", "def closest(self, x, y):\n pts = np.column_stack([self.x, self.y])\n # Transform data coordinates to pixel coordinates.\n pts = self.ax.transData.transform(pts)\n diff = pts - [x, y]\n dist = np.hypot(*diff.T)\n min_index = np.argmin(dist)\n return min_index, dist[min_index]", "def search_matrix(to_search: int, a_matrix: list) -> (int, int):\n row = 0\n column = len(a_matrix[0]) - 1\n while row < len(a_matrix) and column >= 0:\n if to_search < a_matrix[row][column]:\n column -= 1\n elif to_search > a_matrix[row][column]:\n row += 1\n else:\n return row, column\n return -1, -1", "def closest_centroid(x,centroids):\n\tK =len(centroids)\n\tN = len(x)\n\tDistance = np.zeros((N,K))\n\tfor j in range(K):\n\t\tmu = centroids[j]\n\t\tDistance[:,j] = np.linalg.norm(x-mu,axis=1)\n\tout = np.argmin(Distance,axis=1) \n\treturn out" ]
[ "0.76846826", "0.7152678", "0.65159774", "0.6458269", "0.64570606", "0.6453487", "0.64491487", "0.6418172", "0.6344188", "0.6331945", "0.62991154", "0.62834746", "0.62771547", "0.62474954", "0.6215967", "0.6195537", "0.6191709", "0.6147389", "0.6129545", "0.6113139", "0.60623944", "0.606233", "0.6058435", "0.60123354", "0.5976111", "0.5973001", "0.5966138", "0.5958277", "0.59407294", "0.5921387", "0.5908705", "0.59075737", "0.5906178", "0.59027755", "0.5898496", "0.58922964", "0.58238596", "0.5817136", "0.58046806", "0.5794629", "0.5786032", "0.57790613", "0.5760545", "0.57487136", "0.5729835", "0.5727794", "0.57262546", "0.5723319", "0.57142335", "0.5699363", "0.56863594", "0.5669504", "0.5668976", "0.5659022", "0.5657898", "0.5656562", "0.5640429", "0.56398696", "0.56386065", "0.5635183", "0.56332725", "0.5628614", "0.5613239", "0.56091344", "0.5606968", "0.56068265", "0.56064546", "0.5605219", "0.5593027", "0.559182", "0.5589653", "0.5584512", "0.55826277", "0.5568779", "0.55642515", "0.5558048", "0.55453014", "0.5542451", "0.55262434", "0.5525691", "0.551633", "0.55148125", "0.55112", "0.5510455", "0.5500535", "0.5499876", "0.54814476", "0.5480102", "0.5476696", "0.54706466", "0.54668593", "0.54622704", "0.5459552", "0.5458548", "0.5455434", "0.54476726", "0.5441773", "0.5437488", "0.54363096", "0.5430931" ]
0.7358082
1
Each row of the matrix, let's say the jth row, represents the distance between the other data point from the jth point. This function returns the indexes for the points with the smallest distances with respect to each point represented by that specified row. By row, I mean the 0th dimension. Also notice that this function does not include the target particle, i.e. the diagonal element along the matrix is set to zero.
def nearest_points_values_without_self(matrix, num_to_keep): # Set the diagonal to 0 np.fill_diagonal(matrix, 0) # Get the position for the resulted values sort = np.sort(matrix, axis=1) return sort[:, : num_to_keep]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nearest_min(dist_matrix):\n # much faster than np.where\n i, j = np.unravel_index(\n np.argmin(dist_matrix), \n dims=dist_matrix.shape\n )\n return i, j", "def nearest_points_indexes_with_self(matrix, num_to_keep):\n\n # Set the diagonal to 1\n np.fill_diagonal(matrix, 1)\n # Get the position for the resulted values\n sort_arg = np.argsort(matrix, axis=1)\n\n return sort_arg[:, : num_to_keep]", "def nearest_points_indexes_without_self(matrix, num_to_keep):\n\n # Set the diagonal to 0\n np.fill_diagonal(matrix, 0)\n # Get the position for the resulted values\n sort_arg = np.argsort(matrix, axis=1)\n\n return sort_arg[:, : num_to_keep]", "def find_min_distance():\n return np.argmin(d)", "def smallest_distance(self, clusters):\n i, j = numpy.unravel_index(numpy.argmin(clusters), clusters.shape)\n return clusters[i, j], i, j", "def nearest_points_values_with_self(matrix, num_to_keep):\n\n # Set the diagonal to 1\n np.fill_diagonal(matrix, 1)\n # Get the position for the resulted values\n sort = np.sort(matrix, axis=1)\n\n return sort[:, : num_to_keep]", "def nearest_neighbour(matrix, start=0):\n path = [start]\n while len(matrix) != len(path):\n matrix[:, start] = numpy.inf\n start = numpy.argmin(matrix[start])\n path.append(start)\n return path", "def nearest_cluster(X,c):\n K = np.size(c,0)\n idx = np.zeros((np.size(X,0),1))\n arr = np.empty((np.size(X,0),1))\n for i in range(0,K):\n y = c[i]\n temp = np.ones((np.size(X,0),1))*y\n b = np.power(np.subtract(X,temp),2)\n a = np.sum(b,axis = 1)\n a.resize((np.size(X,0),1))\n arr = np.append(arr, a, axis=1)\n arr = np.delete(arr,0,axis=1)\n idx = np.argmin(arr, axis=1)\n return idx", "def nearest_vertex_to(self, point):\n distances = self.distances_to(point)\n idx = np.argmin(distances)\n return idx", "def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y", "def _find_min_pair(pandas_matrix):\n numpy_matrix = pandas_matrix.values\n mins = np.where(numpy_matrix == np.nanmin(numpy_matrix))\n min_col_idx = mins[0][0]\n min_row_idx = mins[1][0]\n (min_col, min_row) = (pandas_matrix.index[min_col_idx], \n pandas_matrix.columns[min_row_idx])\n\n return (min_col, min_row)", "def find_closest_index(traj, point):\n\n\t#TODO: vectorise function to receive any length of points.\n\n\tdistances = np.subtract(np.array(point),traj) \n\tdistances = distances.reshape(-1,2)\n\t#distances = distances[~np.isnan(distances)].reshape(-1,2)\n\n\t#print(\"distances\")\n\t#pprint(distances)\n\tdist_array = np.linalg.norm(distances, axis = 1)\n\t#pprint(dist_array)\n\t#dist_array = np.sqrt((distances[:,0]**2)+(distances[:,1]**2)) #array of distances from trajectory to gaze landing point in world. \n\tidx = np.nanargmin(abs(dist_array)) #find smallest difference in pythag distance from 0,0 to get closest point. \n\tdists = distances[idx, :]\n\tdist = dist_array[idx]\n\n\treturn idx#, dists, dist\n\t#return idx", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def find_closest(distances, threshold):\n n = len(distances)\n person_1 = []\n person_2 = []\n d = []\n\n for i in range(n):\n for j in range(i+1, n):\n if distances[i][j] <= threshold:\n person_1.append(i)\n person_2.append(j)\n d.append(distances[i][j])\n\n return person_1, person_2, d", "def getRowHeuristics(matrix):\n row, col = matrix.shape\n rHeuristic = np.zeros((row,2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica\n for i in range(0,row):\n rHeuristic[i,0] = int(i)\n #print (i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i])))\n rHeuristic[i,1] = 1/sum(matrix[i,:])\n return rHeuristic[rHeuristic[:,1].argsort()]", "def nearestNeighbor(self, coords, my_index, blacklist):\n min_index, min_distance = 0, 999999999\n my_col, my_row = coords[my_index]\n for i, (col, row) in enumerate(coords):\n if i in blacklist:\n continue\n distance = math.sqrt((my_col-col)**2 + (my_row-row)**2)\n if distance < min_distance:\n min_index, min_distance = i, distance\n return min_index", "def _nearest_cluster_distance(distances_row, labels, i):\n label = labels[i]\n b = np.min([np.mean(distances_row[labels == cur_label])\n for cur_label in set(labels) if not cur_label == label])\n return b", "def find_closest_trajectory_pose(self):\n np_state = numpy.array([[self.x], [self.y]])\n temp_distance = numpy.sum(\n (self.np_trajectory[0:2, :] - np_state) ** 2,\n axis=0)\n best_idx = numpy.argmin(temp_distance)\n return best_idx", "def findknn(xTr,xTe,k):\n\n # YOUR CODE HERE\n if k > len(xTr):\n k = len(xTr)\n \n D=l2distance(xTe, xTr)\n (m,n) = D.shape\n \n indices = []\n dists = []\n for i in range(m):\n smallest_indices = np.argsort(D[i])\n ind = smallest_indices[:k]\n dis = D[i,smallest_indices[:k]]\n indices.append(ind)\n dists.append(dis)\n \n indices = np.transpose(np.array(indices))\n dists = np.transpose(np.array(dists))\n return indices, dists", "def getNearestSampleIndex(test, trainX):\n dist_matrix = test - trainX\n dist_square = dist_matrix ** 2\n dist_sums = dist_square.sum(axis=1)\n distance_vector = np.sqrt(dist_sums)\n return (distance_vector).argmin()", "def _first_index_with_smaller_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] <= P[i]:\n i -= 1\n return i", "def saddle_points(matrix):\n if not all(len(row) == len(matrix[0]) for row in matrix[1:]):\n raise ValueError('Provided matrix is irregular.')\n columns = [col for col in zip(*matrix)]\n points = set()\n for ridx, row in enumerate(matrix):\n for cidx, element in enumerate(row):\n if element == max(row) and element == min(columns[cidx]):\n points.add((ridx, cidx))\n return points", "def _compute_euclidean_neigh_matrix(src, d_matrix, radius):\n\n n_max = 100\n n_min = 3\n reached_points = np.array([0])\n counter = 0\n n_neigh = []\n list_neigh = []\n\n while counter < reached_points.shape[0] < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n # Check the number of neighbours\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n list_neigh.append(aux)\n reached_points = np.append(reached_points,\n aux[~np.in1d(aux, reached_points)])\n counter += 1\n\n if counter >= reached_points.shape[0]:\n raise ValueError('Too small value of the radius:'\n 'the neighbour-matrix is not connected')\n elif src.shape[0] == reached_points.shape[0]:\n while counter < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n\n list_neigh.append(aux)\n counter += 1\n\n n_neigh_max = max(n_neigh)\n n_matrix = np.zeros([src.shape[0],\n n_neigh_max], dtype=int) - 1\n for i in range(src.shape[0]):\n n_matrix[i, 0:list_neigh[i].shape[0]] = list_neigh[i]\n index_ord = np.argsort(n_matrix[:, 0])\n n_matrix = n_matrix[index_ord]\n return n_matrix\n else:\n raise RuntimeError(\"Some problems during\"\n \"computation of neighbours.\")", "def min_distance_vertex(distance, visited):\n vertices = len(visited)\n min_distance = INF\n min_index = None\n for v in range(vertices):\n if not visited[v] and distance[v] <= min_distance:\n min_distance = distance[v]\n min_index = v\n return min_index", "def shortestDistance(self, grid):\n # return self.house_oriented_TLE(grid)\n # One axis\n row_count = [sum(row) for row in grid]\n col_count = [0]* len(grid[0])\n row_dist = [0]* len(grid)\n col_dist = [0]* len(grid[0])\n output = sys.maxsize\n for i in range(len(grid)): \n for j in range(len(grid[0])):\n col_count[j] += grid[i][j]\n \n for index_p in range(len(row_count)):\n for index_h in range(len(row_count)):\n row_dist[index_p] += abs(index_h - index_p) * row_count[index_h]\n \n for index_p in range(len(col_count)):\n for index_h in range(len(col_count)):\n col_dist[index_p] += abs(index_h - index_p) * col_count[index_h]\n \n # print(row_count)\n # print(col_count)\n # print(row_dist)\n # print(col_dist)\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n continue\n output = min(output, row_dist[i] + col_dist[j])\n return output", "def closest_points_naive(self, x, y):\r\n # Running time: O(n ** 2)\r\n\r\n dist = []\r\n for i in range(len(x)):\r\n for j in range(i+1, len(x)):\r\n d = self.get_euclidean_distance(x[i], x[j], y[i], y[j])\r\n dist.append(d)\r\n \r\n return min(dist)", "def find_min_point(points):\r\n smallest_point_index = 0\r\n for i in range(1, len(points)):\r\n if points[i][1] < points[smallest_point_index][1]:\r\n smallest_point_index = i\r\n elif points[i][0] > points[smallest_point_index][0] and points[i][1] == points[smallest_point_index][1]:\r\n smallest_point_index = i\r\n return smallest_point_index", "def _findMin(p, A):\n\n m=(-1, (0,0))\n for p0 in A:\n dist = np.linalg.norm(p0-np.array(p))\n if m[0]==-1 or m[0]>dist:\n m = (dist, p0)\n \n return tuple(m[1])", "def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def get_idx(lons, lats, lon, lat):\n dist = ((lons - lon) ** 2 + (lats - lat) ** 2) ** 0.5\n return np.unravel_index(dist.argmin(), dist.shape)", "def get_min_dist(x0, y0, arr):\n dist = np.hypot(arr.T[0] - x0, arr.T[1] - y0)\n min_dist = np.min(dist)\n val = np.argmin(dist)\n return min_dist, arr[val]", "def dist(dm, sm, neighbors):\n\n # Initialize list of possible distances\n distances = []\n\n # loop over all neighbors of the cell\n for neighbor in neighbors:\n # If the neighbor is valid\n if dm[neighbor[0], neighbor[1]] != -1:\n # add neighbor distance + 1 to possible distances\n distances.append(dm[neighbor[0], neighbor[1]] + 1)\n\n # return minimal distance\n return np.min(distances)", "def nodes_min_energy_index(self, node):\n idx = -1\n curr_energy = np.inf\n for i in range(self.cost_matrix.shape[1]):\n new_energy = self.cost_matrix[node][i]\n if new_energy < curr_energy:\n curr_energy = new_energy\n idx = i\n return idx", "def pair_idx(rows, comm=None):\n raise Exception(\"Not implemented\")\n \n if comm == None:\n comm = MPI.COMM_WORLD\n \n total = comb(rows,2,exact=True)\n size = comm.Get_size()\n \n size = 1000\n \n print(total / size)\n \n target = total / size\n \n current_row = 0\n calc_list = []\n row_list = [[] for x in range(size)]\n for rank in range(size):\n row_list[rank].append(current_row)\n \n current_calcs = 0\n \n for value in range(current_row, rows):\n current_calcs += value\n if current_calcs > target:\n if rank == size-1:\n pass\n else:\n break\n \n calc_list.append(current_calcs)\n row_list[rank].append(value)\n current_row = value\n \n return row_list,calc_list", "def get_maxmin_index_from_row(\n distance_matrix: np.ndarray,\n row: int,\n previous_indexes: List,\n type: str,\n )-> int:\n distance_matrix = distance_matrix.copy()\n arr = distance_matrix[row].astype(float)\n \n aux_list = range(arr.shape[0])\n aux_list_2 = []\n for i in aux_list:\n if i in previous_indexes:\n aux_list_2.append(True)\n else:\n aux_list_2.append(False)\n previous_indexes_bool = aux_list_2\n \n if type == 'max':\n arr[previous_indexes_bool] = -1\n target_index = np.argmax(arr)\n if type == 'min':\n arr[previous_indexes_bool] = np.Inf\n target_index = np.argmin(arr)\n \n return target_index", "def get_nearest_atom_inds_per_mol(self):\n self.closest_at_per_mol = np.zeros((self.nmol,\n self.at_per_mol,\n self.at_per_mol-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.at_per_mol)\n for imol in range(self.nmol):\n for iat in range(self.at_per_mol):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist_per_mol[imol, iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_at_per_mol[imol, iat] = at_inds", "def distance_matrix(n_row, n_col):\n\n n_pop = int(n_row * n_col)\n center = int(n_row/2*(n_col+1))\n\n pop_idx = np.arange(n_pop)\n pop_idx_col = np.remainder(pop_idx, n_col)\n pop_idx_row = pop_idx // n_row\n\n pos = np.vstack((pop_idx_col,pop_idx_row)).T\n distance = spa.distance.cdist([pos[center]], pos)[0]\n\n return distance", "def find_min(self, A, w):\n import numpy as np\n\n vcost = self.INFINITY\n vto = vfrom = -1\n for v in w:\n # Get array offset of minimum of this vertex\n i = np.argmin(A[v,:])\n if A[v,i] < vcost:\n vcost = A[v,i]\n vto = i\n vfrom = v\n return (vfrom, vto, vcost)", "def find_nearest_neighbours_SURF(averageDistance,inst,distanceArray,maxInst): \r\n NN=[]\r\n min_indices=[] \r\n\r\n for j in range(maxInst):\r\n if inst != j:\r\n locator = [inst,j]\r\n locator = sorted(locator, reverse=True) #Access corect half of table (result of removed table redundancy)\r\n d = distanceArray[locator[0]][locator[1]]\r\n if d<averageDistance:\r\n min_indices.append(j)\r\n \r\n for j in range(len(min_indices)):\r\n NN.append(min_indices[j])\r\n \r\n return NN", "def min(weightData , dataSetVector ):\r\n # weightData: pass the whole weightData array.\r\n # dataSetVector: pass the a data vector to compare with weightdata array, to find its closest match\r\n winnerIndex = 0 #flag for initalizing the winner index\r\n minValue = EcuDist(dataSetVector,weightData[0]) # initalize the minValue\r\n # iterate through all weighdata rows to find the closest match, depending on ecu. distance,\r\n #and then return the index of the closest match(winner)\r\n for i in range(weightData.shape[0]):\r\n if(EcuDist(dataSetVector,weightData[i]) < minValue):\r\n minValue = EcuDist(dataSetVector,weightData[i])\r\n winnerIndex = i\r\n return winnerIndex", "def get_nearest_atom_inds(self):\n # Create empty data structure\n self.closest_ats = np.zeros((self.natom, self.natom-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.natom)\n for iat in range(self.natom):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist[iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_ats[iat] = at_inds", "def find_nearest_neighbors(p, points, k=5):\n dist = np.zeros(points.shape[0])\n for i in range(len(dist)):\n dist[i] = distance(p, points[i])\n ind = np.argsort(dist)\n return ind[0:k]", "def update_distmatrix(min_idx, dist_matrix):\n i, j = min_idx\n new_cluster_dist = np.minimum(dist_matrix[i, :], dist_matrix[j, :])\n new_cluster_dist[i] = np.inf\n \n dist_matrix[i, :] = new_cluster_dist\n dist_matrix[:, i] = new_cluster_dist\n \n dist_matrix[j, :] = np.inf\n dist_matrix[:, j] = np.inf\n \n return dist_matrix", "def getNeighbors(training_data, test_row, k):\n\n distances = list()\n for training_row in training_data:\n dist = euclidianDistance(training_row, test_row)\n distances.append([training_row, dist])\n \n #Sort on the basis of dist\n distances.sort(key=lambda row:row[1])\n\n neighbors = list()\n\n for i in range(int(k)):\n neighbors.append(distances[i][0])\n\n return neighbors", "def get_nearest_node_index(node_list, random_node):\n\n dist_list = [\n (node.x - random_node.x) ** 2 + (node.y - random_node.y) ** 2\n for node in node_list\n ]\n minind = dist_list.index(min(dist_list))\n\n return minind", "def findSmallest(distancesWithNames):\n smallest = distancesWithNames[0][2]\n smallestIndex = -1\n for i in range(len(distancesWithNames)):\n if smallest >= distancesWithNames[i][2]:\n smallest = distancesWithNames[i][2]\n smallestIndex = i\n return smallestIndex", "def closestCluster(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n\treturn bestIndex", "def smallest_cert(pts):\n # 1. project the points\n def projected_flattened_pt(pt):\n mag = distance(pt)\n distance_wanted = rect_dist\n return distance_wanted/mag*pt[1], \\\n distance_wanted/mag*pt[2]\n\n flat_pts = np.array([[projected_flattened_pt(pt), pt] for pt in pts])\n\n # 2. identify rows of data\n data = {} \n # maps height (from ground) to the \"row\" (scan) list of\n # horizontal-only (1D) data\n for pt, origpt in flat_pts:\n for row_height in data:\n if abs(pt[1]-row_height) < dist_bt_rows/2:\n data[row_height].append([pt[0], origpt])\n break\n else:\n data[pt[1]] = [[pt[0], origpt]]\n\n #3. build up final_data so each row has enough point x-density\n def get_left_i(pts, left_edge):\n # get last index <= left_edge\n for i, pt in enumerate(pts):\n if pt[0] > left_edge:\n return i - 1\n\n def get_right_i(pts, right_edge):\n # get first index >= right_edge\n i = len(pts)-1\n while True:\n pt = pts[i]\n if pt[0] < right_edge:\n return i + 1\n i -= 1\n\n final_data = []\n for _, subdata in data.items():\n subdata = sorted(subdata, key=lambda t: t[0])\n i = get_left_i(subdata, lane_left)\n end = get_right_i(subdata, lane_right)\n\n final_data = [subdata[i][1]]\n while i < end:\n next_i = get_left_i(subdata[i:], \n subdata[i][0]+max_xpt_separation)\n if not next_i:\n return False\n i += next_i\n final_data.append(subdata[i][1])\n\n return final_data", "def get_min_shannon_entropy(grid):\r\n curr_min = math.inf\r\n curr_best = []\r\n for i in range(len(grid[0])):\r\n for j in range(len(grid)):\r\n if not grid[j][i].collapsed:\r\n w = grid[j][i].block_weights\r\n shannon_entropy = sum([-math.log(el) for el in w] )\r\n if shannon_entropy < curr_min:\r\n curr_min = shannon_entropy\r\n curr_best = [(i,j)]\r\n elif shannon_entropy == curr_min:\r\n curr_best.append((i,j))\r\n idx = np.random.choice(range(len(curr_best))) #choose randomly if theres a tie\r\n return curr_best[idx] #x,y\r", "def detec_mfo_dist(betas):\n k = len(betas)\n min_dist = np.inf\n for i in range(k-1):\n for j in range(i+1,k):\n distance = np.sum((betas[i,:] - betas[j,:])**2)\n if distance < min_dist:\n MFO_index = [i,j]\n min_dist = distance\n return MFO_index", "def _lowestDistanceToCluster(self, clusters: ndarray, sequenceIdx: int) -> Tuple[int, float]:\n lowestClusterIdx = -1\n lowestDistance = np.inf\n for cIdx in range(self.numClusters):\n distance = self._distanceToCluster(clusters[cIdx], sequenceIdx)\n if distance < lowestDistance:\n lowestClusterIdx = cIdx\n lowestDistance = distance\n return lowestClusterIdx, lowestDistance", "def eeg_findnearest(x,X):\t\n\t#x array or vector and X a scalar\n\tabsdif = np.abs(x-X)\n\tval = np.min(absdif)\n\tidx = absdif.argmin()\n\treturn val,idx", "def get_index_under_point(self, event):\r\n xy = np.asarray(list(zip(self.xs, self.ys)))\r\n xyt = self.line.get_transform().transform(xy)\r\n xt, yt = xyt[:, 0], xyt[:, 1]\r\n d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2)\r\n pt_idx = np.argmin(d)\r\n if d[pt_idx] >= self.max_pixels_from_vertex:\r\n pt_idx = None\r\n return pt_idx", "def _kth_nearest_neighbor_dist(\n distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix], k\n) -> np.ndarray:\n\n if not is_integer(k):\n raise ValueError(f\"parameter 'k={k}' must be a positive integer\")\n else:\n # make sure we deal with Python built-in\n k = int(k)\n\n if not (0 <= k <= distance_matrix.shape[1]):\n raise ValueError(\n \"'k' must be an integer between 1 and \"\n f\"distance_matrix.shape[1]={distance_matrix.shape[1]}\"\n )\n\n if isinstance(distance_matrix, np.ndarray):\n dist_knn = np.partition(distance_matrix, k - 1, axis=1)[:, k - 1]\n elif isinstance(distance_matrix, scipy.sparse.csr_matrix):\n # see mircobenchmark_kth_nn.py for a comparison of implementations for the\n # sparse case\n\n def _get_kth_largest_elements_sparse(\n data: np.ndarray,\n indptr: np.ndarray,\n row_nnz,\n k_neighbor: int,\n ):\n dist_knn = np.zeros(len(row_nnz))\n for i in range(len(row_nnz)):\n start_row = indptr[i]\n dist_knn[i] = np.partition(\n data[start_row : start_row + row_nnz[i]], k_neighbor - 1\n )[k_neighbor - 1]\n\n return dist_knn\n\n row_nnz = distance_matrix.getnnz(axis=1)\n\n if (row_nnz < k).any():\n raise ValueError(\n f\"There are {(row_nnz < k).sum()} points that \"\n f\"do not have at least k_neighbor={k}.\"\n )\n\n dist_knn = _get_kth_largest_elements_sparse(\n distance_matrix.data,\n distance_matrix.indptr,\n row_nnz,\n k,\n )\n else:\n raise TypeError(f\"type {type(distance_matrix)} not supported\")\n\n return dist_knn", "def closestCentroids(self, points , centroids ):\n dists = scipy.spatial.distance.cdist(points,centroids)\n # 1 is dimension\n minIds = numpy.argmin(dists, 1)\n return minIds", "def closestClusterAndDistance(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n return (bestIndex, closest)", "def get_nearest_indices(vector, matrix, num=5):\n if len(matrix) < num:\n num = len(matrix)\n\n m, n = matrix.shape\n diff_matrix = np.tile(vector, (m, 1)) - matrix\n diff_matrix = abs(diff_matrix)\n\n distance = diff_matrix.sum(axis=1)\n sortIndices = np.argsort(distance)\n sortIndices = sortIndices[0:num]\n\n return sortIndices[random.randint(0, num - 1)]", "def exhaustive_search(X, z):\n #initialize shortest path and distance to the first row\n d_star = la.norm(z - X[0])\n x_star = X[0]\n #check the distance from each row of X and if shortest thus far, save it\n for i in range(X.shape[0]):\n #calculate distance from current row to target z\n x = X[i,:]\n cur_dist = la.norm(x - z)\n #if the distance is smallest thus far, save the row and distance\n if cur_dist < d_star:\n x_star = x\n d_star = cur_dist\n return x_star, d_star", "def find_min_path(s, t, dist):\n\n rows = len(dist) - 1\n cols = len(dist[0]) - 1\n col = cols\n row = rows\n pos_str = \"Position: (row={} col={}) -> (row={} col={})\"\n cst_str = \"Cost: {}\"\n prev_row = row\n prev_col = col\n\n # init sparse path matrix\n sparse_path = [[\" \" for x in range(cols + 1)] for x in range(rows + 1)]\n sparse_path[0][0] = \"0\"\n\n # start with operation at (rows, cols) and work backwards\n sparse_path[rows][cols] = dist[rows][cols]\n\n if verbose == 2:\n print()\n print(\"Initial Minimum Path Matrix:\")\n print_matrix(s, t, sparse_path)\n\n while True:\n\n # bail out if we are in the corner\n if row == 0 and col == 0:\n break\n\n # if we are not at a matrix boundary\n if row != 0 and col != 0: # if at left edge or top row, cannot move diagonally\n\n # diagonal\n if (dist[row - 1][col - 1] == min(dist[row - 1][col],\n dist[row][col - 1],\n dist[row - 1][col - 1])) and (dist[row - 1][col - 1] == dist[row][col] or dist[row - 1][col - 1] == dist[row][col] - 1):\n sparse_path[row - 1][col - 1] = dist[row - 1][col - 1]\n temp_cost = dist[row - 1][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # left\n elif dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # above\n else:\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # if at matrix edge, can only move up\n elif col == 0:\n # above\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # must be at row boundary, can only move left\n else:\n # left\n if dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # print matrix\n if verbose == 2:\n print_matrix(s, t, sparse_path)\n\n return sparse_path", "def find_closest_atom(coords1, coords2):\n\n coords1 = np.array(coords1)\n coords2 = np.array(coords2)\n diff = coords2[:, np.newaxis] - coords1[np.newaxis, :]\n dist = np.einsum('ijk->ij', diff**2)**0.5\n index = np.argmin(dist)\n return index", "def find_neighbor_indices(atoms, probe, k):\r\n coords_all = atoms.getCoords()\r\n neighbor_indices = []\r\n atom_k = atoms[k]\r\n radius = atom_k.getRadius() + probe + probe\r\n\r\n indices = range(k)\r\n indices.extend(range(k+1, len(atoms)))\r\n\r\n for i in indices:\r\n\tdist = pos_distance(coords_all[k], coords_all[i])\r\n\t#dist = np.linalg.norm(coords_all[k] - coords_all[i])\r\n\tif dist < radius + atoms[i].getRadius():\r\n neighbor_indices.append(i)\r\n\r\n return neighbor_indices", "def djikstre(connection_mat):\n n = connection_mat.shape[0]\n dist, prev = {}, {}\n Q = list(range(n))\n \n for i in Q:\n dist[i] = np.inf\n dist[n-2] = 0.0\n \n while(len(Q)>0):\n\n min_dist = min([dist[key] for key in Q])\n u = [key for key in Q if dist[key] == min_dist][0]\n Q.remove(u)\n\n for v in np.nonzero(connection_mat[:, u])[0]:\n \n alt = dist[u]+connection_mat[v, u]\n \n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n \n return dist, prev", "def _first_index_with_bigger_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] >= P[i]:\n i -= 1\n return i", "def neirest_neighbour(business, cells):\n array = cells.get_neighbours(business, num=1)\n neighbours = pd.DataFrame(array).set_index('index')\n index = neighbours['distance'].idxmin()\n return neighbours.loc[index]", "def _retrieve_neighbors(df, i_point, point, eps, column):\n neigborhood = []\n for index, row in df.iterrows():\n if index != i_point:\n a = np.array(point[column])\n b = np.array([row[column]])\n distance = np.linalg.norm(a - b)\n if distance <= eps:\n neigborhood.append(index)\n\n return neigborhood", "def find_nearest_neighbors(p, points, k):\n\timport numpy as np\n\tdistances = np.zeros(points.shape[0])\n\tfor i in range(len(distances)):\n\t\tdistances[i] = distance(p,points[i])\n\tind = np.argsort(distances)\n\treturn ind[0:k]", "def get_nearest_row(self):\n return (self.rect.top - (self.screen.get_height() // 12)) // self.maze.block_size", "def find_nearest_neighbour_from_point(point_cloud:np.ndarray, point:int) -> int:\n pass", "def _nearest_point_index(points, point):\n distance = sys.float_info.max\n index = None\n for i, p in enumerate(points):\n temp = _vec_distance(p, point)\n if temp < distance:\n distance = temp\n index = i\n return index, distance", "def nearest_neighbor(data):\n features = set([i for i, x in enumerate(data[0][1])])\n return leave_one_out_cross_validation(data, features)", "def shortestpathij(self, i, j):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for i in range(len(pathlist)):\n Temp = 0\n for j in range(len(pathlist[i]) - 1):\n Temp += self.Dismatrix[pathlist[i][j], pathlist[i][j+1]]\n distance.append(Temp)\n \n if(len(distance) == 0):\n return None\n else:\n return min(distance)", "def get_nearest_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) <= 1: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def closest_node(node, nodes):\n nodes = np.asarray(nodes)\n deltas = nodes - node\n dist_2 = np.einsum(\"ij,ij->i\", deltas, deltas)\n return np.argmin(dist_2), np.min(dist_2)", "def extract_min(H, ds):\n minDist = approxInf\n u = None # min vertex unknown\n i = 0\n for v in H:\n if ds[v] <= minDist:\n minDist = ds[v]\n u = v # note that u is unused (instead returned by pop)\n imin = i\n i += 1\n return(H.pop(imin)) # return [u, d]", "def min_distance(s1, s2):\n n = len(s1)\n m = len(s2)\n matrix = [([0]*(m+1)) for i in xrange(n+1)]\n for i in xrange(m+1):\n matrix[0][i] = i\n for i in xrange(n+1):\n matrix[i][0] = i\n for i in xrange(1,n+1):\n for j in xrange(1,m+1):\n temp = min(matrix[i-1][j]+1, matrix[i][j-1]+1)\n d = 0 if s1[i-1]==s2[j-1] else 1\n matrix[i][j] = min(temp, matrix[i-1][j-1]+d)\n return matrix[n][m]", "def getNearestLineIndex(row, tagLineNumbers):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # initialize local auxiliary variables {{{\n nearestLineNumber = -1\n nearestLineIndex = -1\n # }}}\n\n # go through all tag line numbers and find the one nearest to the specified row {{{\n for lineIndex, lineNumber in enumerate(tagLineNumbers):\n # if the current line is nearer the current cursor position, take it {{{\n if (nearestLineNumber < lineNumber <= row):\n nearestLineNumber = lineNumber\n nearestLineIndex = lineIndex\n # }}}\n\n # if we've got past the current cursor position, let's end the search {{{\n if (lineNumber >= row):\n break\n # }}}\n # }}}\n\n # return index of the line with the nearest tag\n return nearestLineIndex\n # }}}", "def closest(self, x, y):\n if self.direction == 'horizontal':\n p_pts = np.array([\n self.ax.transData.transform((p, 0))[0] for p in self.positions\n ])\n dist = abs(p_pts - x)\n else:\n p_pts = np.array([\n self.ax.transData.transform((0, p))[1] for p in self.positions\n ])\n dist = abs(p_pts - y)\n index = np.argmin(dist)\n return index, dist[index]", "def _low_tri_indices(rowCount):\n for col in range(rowCount):\n for row in range(col, rowCount):\n yield (row, col)", "def findNearset(x,y,lon,lat):\n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n\n return np.argwhere(dist==dist.min())[0][0]", "def min_horizontal_dist_meters(coords, targets, is_geo=False):\n xe = coords[:, 0]\n ye = coords[:, 1]\n n = len(xe)\n d = np.zeros(n)\n for j in range(n):\n d1 = dist_in_meters(targets, [xe[j], ye[j]], is_geo=is_geo)\n d[j] = d1.min()\n return d", "def findminpath(tab, gxtab, gytab, pixtab):\n\n pathdist = 2 # the number of points each points on a ray can related to on the previous ray\n pathdist_penalty = 0.3 # penalty of the difference of the pathdist\n pathpix_penalty = 2 # penalty of the difference of pixel values between the point and the previous point\n nray = tab.shape[1]\n\n #tab = np.hstack((tab,tab[:, 0].reshape(tab.shape[0], 1)))\n #pixtab = np.hstack((pixtab,pixtab[:, 0].reshape(pixtab.shape[0], 1)))\n #gxtab = np.hstack((gxtab,gxtab[:, 0].reshape(gxtab.shape[0], 1)))\n #gytab = np.hstack((gytab,gytab[:, 0].reshape(gytab.shape[0], 1)))\n\n tab = np.hstack((tab,tab,tab)) # horizontally stack the tab matrix to prepare for the filtering on the result\n pixtab = np.hstack((pixtab,pixtab,pixtab))\n gxtab = np.hstack((gxtab,gxtab,gxtab))\n gytab = np.hstack((gytab,gytab,gytab))\n\n tab = (tab - tab.min()) / (tab.max() - tab.min()) # noralize the tab matrix\n pixtab = (pixtab - pixtab.min()) / (pixtab.max() - pixtab.min()) * -1 # for we want to find the white contour of the cell so we multipy -1 on the pixtab\n # tab = tab / np.median(tab)\n # pixtab = pixtab / np.median(pixtab)\n path = np.zeros(tab.shape)\n path[:, 0] = np.array(range(0, tab.shape[0]))\n score = np.zeros(tab.shape)\n score[:, 1] = tab[:, 1]\n\n for i in range(1, tab.shape[1]):\n for j in range(tab.shape[0]):\n mins = np.Inf # record the min value of the ray\n minat = 0\n for k in range(-pathdist, pathdist+1):\n if(0 <= (j+k) and (j+k) < tab.shape[0]):\n s = pixtab[j, i]\n pixdiff = abs(pixtab[j, i] - pixtab[j+k, i-1])\n s += pixdiff * pathpix_penalty # two kinds of penalty\n s += abs(k) * pathdist_penalty\n s += score[j+k, i-1]\n\n if(s < mins):\n mins = s\n minat = j + k\n path[j, i] = minat\n score[j, i]= mins\n\n start = int(np.argmin(score[:, -1]))\n path = path.astype(np.int32)\n minpath = [start]\n for i in range(tab.shape[1]-1, 0, -1):\n minpath.append(path[minpath[-1], i])\n minpath = minpath[::-1]\n # print(len(minpath))\n minpath = savgol_filter(minpath, 15, 3) # apply a Savitzky-Golay filter to the raw minpath signal\n minpath = minpath[nray:nray*2] # cut the middle part of minpath whose length is nray\n return np.array(minpath).astype(np.int32)", "def nearest_sparse(self, query):\n self.best_dist = float(\"inf\")\n self.best_element = None\n self._register_best_element = self._register_best_element_single \n self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)\n return self.best_element,self.best_dist", "def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass", "def numNeighbors(minesSet, row_index, cols_index, num_cols, num_rows):\n mines = 0\n for j in np.arange(max(0, cols_index-1), min(num_cols-1, cols_index+1)+1):\n for i in np.arange(max(0, row_index-1), min(num_rows-1, row_index+1)+1):\n if ((i, j) in minesSet):\n mines+=1\n return mines", "def _intra_cluster_distance(distances_row, labels, i):\n mask = labels == labels[i]\n mask[i] = False\n if not np.any(mask):\n # cluster of size 1\n return 0\n a = np.mean(distances_row[mask])\n return a", "def closest_waypoint(self, location: pylot.utils.Location):\n min_dist = np.infty\n min_index = 0\n for index, waypoint in enumerate(self.waypoints):\n dist = waypoint.location.distance(location)\n if dist < min_dist:\n min_dist = dist\n min_index = index\n return min_index", "def __find_immediate_predecessors(storm_object_table, target_row):\n\n error_checking.assert_is_integer(target_row)\n error_checking.assert_is_geq(target_row, 0)\n error_checking.assert_is_less_than(\n target_row, len(storm_object_table.index)\n )\n\n predecessor_sec_id_strings = [\n storm_object_table[c].values[target_row]\n for c in PREV_SECONDARY_ID_COLUMNS\n if storm_object_table[c].values[target_row] != ''\n ]\n\n num_predecessors = len(predecessor_sec_id_strings)\n if num_predecessors == 0:\n return numpy.array([], dtype=int)\n\n target_time_unix_sec = storm_object_table[\n tracking_utils.VALID_TIME_COLUMN].values[target_row]\n\n predecessor_rows = numpy.full(num_predecessors, -1, dtype=int)\n\n for i in range(num_predecessors):\n these_rows = numpy.where(numpy.logical_and(\n storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values ==\n predecessor_sec_id_strings[i],\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values <\n target_time_unix_sec\n ))[0]\n\n if len(these_rows) == 0:\n continue\n\n this_subrow = numpy.argmax(\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values[\n these_rows]\n )\n\n predecessor_rows[i] = these_rows[this_subrow]\n\n return predecessor_rows[predecessor_rows >= 0]", "def matrix_idx(n_hist, n_req, n_rows):\n\n flat_idx = []\n for i in range(n_rows):\n flat_idx.extend(range(i * n_req, (i + 1) * n_req + n_hist))\n # idx = np.unravel_index(flat_idx, (n_rows, n_hist + n_req))\n\n idx_matrix = np.reshape(flat_idx, (n_rows, n_hist + n_req))\n idxX = idx_matrix[:, n_req:]\n idxY = idx_matrix[:, :n_req]\n\n return idxX, idxY", "def _partition_nearest(self, medoids, dists, only_these=set()):\n if len(only_these) == 0:\n allowed_inds = self._not_ignored_inds\n else:\n allowed_inds = self._not_ignored_inds & only_these\n closest_medoid_ind = np.argmin(dists[:,medoids], 1) # If len(medoids)==3, would look like [2,1,1,0,1,2,...].\n clusts = [[] for i in medoids]\n for node_ind, med_ind in enumerate(closest_medoid_ind):\n if node_ind in allowed_inds:\n clusts[med_ind].append(node_ind)\n return clusts", "def find_closest_pt(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find j index of closest grid point\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n jj = N.argsort(work)[0]\n\n # find i index of closest grid point\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n ii = N.argsort(work)[0]\n\n return ii, jj", "def index_condensed_matrix(n, i, j):\n if i == j:\n main_warning(\"Diagonal elements (i=j) are not stored in condensed matrices.\")\n return None\n elif i > j:\n i, j = j, i\n return int(i * (n - (i + 3) * 0.5) + j - 1)", "def nearest_neighbors(self):\n neighbor_distances_and_indices = []\n for idx, data_point in enumerate(self.data):\n distance = self.euclidean_dis(data_point[:-1], self.query) # Calculate the distance between the query\n # example and the current example from the data.\n\n neighbor_distances_and_indices.append((distance, idx)) # Add the distance and the index of the example\n # to an ordered collection\n\n sorted_neighbor_distances_and_indices = sorted(neighbor_distances_and_indices, key=lambda x: x[0]) #\n # Sort the ordered collection of distances and indices from smallest to largest (in ascending order) by\n # the distances\n\n k_nearest_distances_and_indices = sorted_neighbor_distances_and_indices[:self.k] # Pick the first K\n # entries from the sorted collection\n\n k_nearest_labels = [self.data[i][1] for distance, i in k_nearest_distances_and_indices] # Get the labels of\n # the selected K entries\n\n return k_nearest_labels, self.mode(k_nearest_labels)", "def closest_centroid(x,centroids):\n\tK =len(centroids)\n\tN = len(x)\n\tDistance = np.zeros((N,K))\n\tfor j in range(K):\n\t\tmu = centroids[j]\n\t\tDistance[:,j] = np.linalg.norm(x-mu,axis=1)\n\tout = np.argmin(Distance,axis=1) \n\treturn out", "def medoid_ft(ftvectors,pairwise_dist_matrix):\r\n N = len(ftvectors)\r\n if N == 1 : \r\n return 0 \r\n # pairwise_dist_matrix = calculate_dist_matrix_ft(ftvectors) \r\n sum_dist = np.sum(pairwise_dist_matrix, axis = 0)\r\n min_idx = np.argmin(sum_dist)\r\n return min_idx", "def nn_dists(coords, spherical=True):\n\n full_dist_matrix = all_dists(coords, spherical)\n\n # find all minimum distances\n # apply min over ranges of the dist array\n min_dists = np.min(full_dist_matrix, axis=1)\n min_ix = np.argmin(full_dist_matrix, axis=1)\n\n return min_dists, min_ix", "def find_indices(colorhs, centres):\n\n indices = np.zeros(colorhs.shape[0], dtype=np.uint8)\n i = 0\n\n for hs in colorhs:\n # Past Euclidian distance\n past_ed = float(\"inf\")\n for cluster in range(centres.shape[0]):\n # Current Euclidian distance\n curr_ed = (sum((hs - centres[cluster, :]) ** 2)) ** 1/2\n # A frame belongs to the cluster with the minimum ed value.\n if curr_ed <= past_ed:\n past_ed = curr_ed\n indices[i] = cluster\n i += 1\n return indices", "def minesweeper(matrix):\n \n num_rows = len(matrix)\n num_cols = len(matrix[0])\n \n adj_mines = []\n \n adj_row = [0]*num_cols\n \n for i in range(num_rows):\n adj_mines.append(adj_row[:])\n \n for r in range(num_rows):\n for c in range(num_cols):\n if matrix[r][c] == True:\n if (r-1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r-1][c-1] += 1\n if (r-1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r-1][c] += 1\n if (r-1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r-1][c+1] += 1\n if (r) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r][c-1] += 1\n if (r) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r][c+1] += 1\n if (r+1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r+1][c-1] += 1\n if (r+1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r+1][c] += 1\n if (r+1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r+1][c+1] += 1\n\n \n return adj_mines", "def findClosetCentroids(X, centroids):\n\tm, n = X.shape\n\tK = centroids.shape[0]\n\tidx = np.zeros(m) # m\n\n\tfor i in range(m):\n\t\ttemp = np.tile(X[i, :], K).reshape(centroids.shape)\n\t\tidx[i] = np.argmin(np.sum((centroids - temp) ** 2, axis=1))\n\treturn idx", "def find_nearest(ref_array,target_array):\n ref_tree = scipy.spatial.cKDTree(ref_array)\n dist, indices = ref_tree.query(target_array, k=1)\n return indices" ]
[ "0.7677283", "0.7389674", "0.7215832", "0.65018827", "0.64853066", "0.6480093", "0.64236856", "0.6417016", "0.64140975", "0.6329937", "0.63131666", "0.63109416", "0.63027555", "0.62869954", "0.6206853", "0.6192381", "0.6176218", "0.61489695", "0.6099502", "0.60955817", "0.60761005", "0.60444117", "0.60416424", "0.60360634", "0.59951365", "0.5976779", "0.597576", "0.593282", "0.59230864", "0.59188306", "0.5918303", "0.5917492", "0.5910718", "0.59076756", "0.59005046", "0.587552", "0.5851686", "0.5837209", "0.5800773", "0.5792634", "0.5790263", "0.57880366", "0.5767146", "0.57514036", "0.57398194", "0.5733956", "0.5733956", "0.57217693", "0.5700117", "0.5699564", "0.5695812", "0.5682173", "0.56744385", "0.56577855", "0.56558686", "0.56555545", "0.5653263", "0.5648361", "0.56477726", "0.5647009", "0.563409", "0.56335485", "0.5629854", "0.5622456", "0.5620252", "0.5609443", "0.5603943", "0.5598129", "0.5596091", "0.55842704", "0.55797595", "0.55738246", "0.55701625", "0.5558328", "0.55554163", "0.55529463", "0.55517614", "0.55496764", "0.5549648", "0.55457914", "0.55318844", "0.551883", "0.55082893", "0.55014145", "0.5501182", "0.5492334", "0.54880804", "0.5487481", "0.5485643", "0.54840255", "0.54826933", "0.5478958", "0.54749215", "0.5461066", "0.5455318", "0.54510504", "0.5439225", "0.54364276", "0.54334986", "0.5426398" ]
0.62730277
14
Each row of the matrix, let's say the jth row, represents the distance between the other data point from the jth point. This function returns the indexes for the points with the smallest distances with respect to each point represented by that specified row. By row, I mean the 0th dimension. Also notice that this function includes the target particle, i.e. the diagonal element along the matrix is set to 1.
def nearest_points_values_with_self(matrix, num_to_keep): # Set the diagonal to 1 np.fill_diagonal(matrix, 1) # Get the position for the resulted values sort = np.sort(matrix, axis=1) return sort[:, : num_to_keep]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nearest_min(dist_matrix):\n # much faster than np.where\n i, j = np.unravel_index(\n np.argmin(dist_matrix), \n dims=dist_matrix.shape\n )\n return i, j", "def nearest_points_indexes_with_self(matrix, num_to_keep):\n\n # Set the diagonal to 1\n np.fill_diagonal(matrix, 1)\n # Get the position for the resulted values\n sort_arg = np.argsort(matrix, axis=1)\n\n return sort_arg[:, : num_to_keep]", "def nearest_points_indexes_without_self(matrix, num_to_keep):\n\n # Set the diagonal to 0\n np.fill_diagonal(matrix, 0)\n # Get the position for the resulted values\n sort_arg = np.argsort(matrix, axis=1)\n\n return sort_arg[:, : num_to_keep]", "def find_min_distance():\n return np.argmin(d)", "def nearest_neighbour(matrix, start=0):\n path = [start]\n while len(matrix) != len(path):\n matrix[:, start] = numpy.inf\n start = numpy.argmin(matrix[start])\n path.append(start)\n return path", "def nearest_vertex_to(self, point):\n distances = self.distances_to(point)\n idx = np.argmin(distances)\n return idx", "def smallest_distance(self, clusters):\n i, j = numpy.unravel_index(numpy.argmin(clusters), clusters.shape)\n return clusters[i, j], i, j", "def nearest_cluster(X,c):\n K = np.size(c,0)\n idx = np.zeros((np.size(X,0),1))\n arr = np.empty((np.size(X,0),1))\n for i in range(0,K):\n y = c[i]\n temp = np.ones((np.size(X,0),1))*y\n b = np.power(np.subtract(X,temp),2)\n a = np.sum(b,axis = 1)\n a.resize((np.size(X,0),1))\n arr = np.append(arr, a, axis=1)\n arr = np.delete(arr,0,axis=1)\n idx = np.argmin(arr, axis=1)\n return idx", "def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y", "def find_closest_index(traj, point):\n\n\t#TODO: vectorise function to receive any length of points.\n\n\tdistances = np.subtract(np.array(point),traj) \n\tdistances = distances.reshape(-1,2)\n\t#distances = distances[~np.isnan(distances)].reshape(-1,2)\n\n\t#print(\"distances\")\n\t#pprint(distances)\n\tdist_array = np.linalg.norm(distances, axis = 1)\n\t#pprint(dist_array)\n\t#dist_array = np.sqrt((distances[:,0]**2)+(distances[:,1]**2)) #array of distances from trajectory to gaze landing point in world. \n\tidx = np.nanargmin(abs(dist_array)) #find smallest difference in pythag distance from 0,0 to get closest point. \n\tdists = distances[idx, :]\n\tdist = dist_array[idx]\n\n\treturn idx#, dists, dist\n\t#return idx", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def _find_min_pair(pandas_matrix):\n numpy_matrix = pandas_matrix.values\n mins = np.where(numpy_matrix == np.nanmin(numpy_matrix))\n min_col_idx = mins[0][0]\n min_row_idx = mins[1][0]\n (min_col, min_row) = (pandas_matrix.index[min_col_idx], \n pandas_matrix.columns[min_row_idx])\n\n return (min_col, min_row)", "def find_closest(distances, threshold):\n n = len(distances)\n person_1 = []\n person_2 = []\n d = []\n\n for i in range(n):\n for j in range(i+1, n):\n if distances[i][j] <= threshold:\n person_1.append(i)\n person_2.append(j)\n d.append(distances[i][j])\n\n return person_1, person_2, d", "def nearest_points_values_without_self(matrix, num_to_keep):\n\n # Set the diagonal to 0\n np.fill_diagonal(matrix, 0)\n # Get the position for the resulted values\n sort = np.sort(matrix, axis=1)\n\n return sort[:, : num_to_keep]", "def getRowHeuristics(matrix):\n row, col = matrix.shape\n rHeuristic = np.zeros((row,2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica\n for i in range(0,row):\n rHeuristic[i,0] = int(i)\n #print (i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i])))\n rHeuristic[i,1] = 1/sum(matrix[i,:])\n return rHeuristic[rHeuristic[:,1].argsort()]", "def nearestNeighbor(self, coords, my_index, blacklist):\n min_index, min_distance = 0, 999999999\n my_col, my_row = coords[my_index]\n for i, (col, row) in enumerate(coords):\n if i in blacklist:\n continue\n distance = math.sqrt((my_col-col)**2 + (my_row-row)**2)\n if distance < min_distance:\n min_index, min_distance = i, distance\n return min_index", "def _nearest_cluster_distance(distances_row, labels, i):\n label = labels[i]\n b = np.min([np.mean(distances_row[labels == cur_label])\n for cur_label in set(labels) if not cur_label == label])\n return b", "def getNearestSampleIndex(test, trainX):\n dist_matrix = test - trainX\n dist_square = dist_matrix ** 2\n dist_sums = dist_square.sum(axis=1)\n distance_vector = np.sqrt(dist_sums)\n return (distance_vector).argmin()", "def find_closest_trajectory_pose(self):\n np_state = numpy.array([[self.x], [self.y]])\n temp_distance = numpy.sum(\n (self.np_trajectory[0:2, :] - np_state) ** 2,\n axis=0)\n best_idx = numpy.argmin(temp_distance)\n return best_idx", "def findknn(xTr,xTe,k):\n\n # YOUR CODE HERE\n if k > len(xTr):\n k = len(xTr)\n \n D=l2distance(xTe, xTr)\n (m,n) = D.shape\n \n indices = []\n dists = []\n for i in range(m):\n smallest_indices = np.argsort(D[i])\n ind = smallest_indices[:k]\n dis = D[i,smallest_indices[:k]]\n indices.append(ind)\n dists.append(dis)\n \n indices = np.transpose(np.array(indices))\n dists = np.transpose(np.array(dists))\n return indices, dists", "def _compute_euclidean_neigh_matrix(src, d_matrix, radius):\n\n n_max = 100\n n_min = 3\n reached_points = np.array([0])\n counter = 0\n n_neigh = []\n list_neigh = []\n\n while counter < reached_points.shape[0] < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n # Check the number of neighbours\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n list_neigh.append(aux)\n reached_points = np.append(reached_points,\n aux[~np.in1d(aux, reached_points)])\n counter += 1\n\n if counter >= reached_points.shape[0]:\n raise ValueError('Too small value of the radius:'\n 'the neighbour-matrix is not connected')\n elif src.shape[0] == reached_points.shape[0]:\n while counter < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n\n list_neigh.append(aux)\n counter += 1\n\n n_neigh_max = max(n_neigh)\n n_matrix = np.zeros([src.shape[0],\n n_neigh_max], dtype=int) - 1\n for i in range(src.shape[0]):\n n_matrix[i, 0:list_neigh[i].shape[0]] = list_neigh[i]\n index_ord = np.argsort(n_matrix[:, 0])\n n_matrix = n_matrix[index_ord]\n return n_matrix\n else:\n raise RuntimeError(\"Some problems during\"\n \"computation of neighbours.\")", "def saddle_points(matrix):\n if not all(len(row) == len(matrix[0]) for row in matrix[1:]):\n raise ValueError('Provided matrix is irregular.')\n columns = [col for col in zip(*matrix)]\n points = set()\n for ridx, row in enumerate(matrix):\n for cidx, element in enumerate(row):\n if element == max(row) and element == min(columns[cidx]):\n points.add((ridx, cidx))\n return points", "def _first_index_with_smaller_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] <= P[i]:\n i -= 1\n return i", "def min_distance_vertex(distance, visited):\n vertices = len(visited)\n min_distance = INF\n min_index = None\n for v in range(vertices):\n if not visited[v] and distance[v] <= min_distance:\n min_distance = distance[v]\n min_index = v\n return min_index", "def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def closest_points_naive(self, x, y):\r\n # Running time: O(n ** 2)\r\n\r\n dist = []\r\n for i in range(len(x)):\r\n for j in range(i+1, len(x)):\r\n d = self.get_euclidean_distance(x[i], x[j], y[i], y[j])\r\n dist.append(d)\r\n \r\n return min(dist)", "def shortestDistance(self, grid):\n # return self.house_oriented_TLE(grid)\n # One axis\n row_count = [sum(row) for row in grid]\n col_count = [0]* len(grid[0])\n row_dist = [0]* len(grid)\n col_dist = [0]* len(grid[0])\n output = sys.maxsize\n for i in range(len(grid)): \n for j in range(len(grid[0])):\n col_count[j] += grid[i][j]\n \n for index_p in range(len(row_count)):\n for index_h in range(len(row_count)):\n row_dist[index_p] += abs(index_h - index_p) * row_count[index_h]\n \n for index_p in range(len(col_count)):\n for index_h in range(len(col_count)):\n col_dist[index_p] += abs(index_h - index_p) * col_count[index_h]\n \n # print(row_count)\n # print(col_count)\n # print(row_dist)\n # print(col_dist)\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n continue\n output = min(output, row_dist[i] + col_dist[j])\n return output", "def find_min_point(points):\r\n smallest_point_index = 0\r\n for i in range(1, len(points)):\r\n if points[i][1] < points[smallest_point_index][1]:\r\n smallest_point_index = i\r\n elif points[i][0] > points[smallest_point_index][0] and points[i][1] == points[smallest_point_index][1]:\r\n smallest_point_index = i\r\n return smallest_point_index", "def dist(dm, sm, neighbors):\n\n # Initialize list of possible distances\n distances = []\n\n # loop over all neighbors of the cell\n for neighbor in neighbors:\n # If the neighbor is valid\n if dm[neighbor[0], neighbor[1]] != -1:\n # add neighbor distance + 1 to possible distances\n distances.append(dm[neighbor[0], neighbor[1]] + 1)\n\n # return minimal distance\n return np.min(distances)", "def get_min_dist(x0, y0, arr):\n dist = np.hypot(arr.T[0] - x0, arr.T[1] - y0)\n min_dist = np.min(dist)\n val = np.argmin(dist)\n return min_dist, arr[val]", "def nodes_min_energy_index(self, node):\n idx = -1\n curr_energy = np.inf\n for i in range(self.cost_matrix.shape[1]):\n new_energy = self.cost_matrix[node][i]\n if new_energy < curr_energy:\n curr_energy = new_energy\n idx = i\n return idx", "def get_maxmin_index_from_row(\n distance_matrix: np.ndarray,\n row: int,\n previous_indexes: List,\n type: str,\n )-> int:\n distance_matrix = distance_matrix.copy()\n arr = distance_matrix[row].astype(float)\n \n aux_list = range(arr.shape[0])\n aux_list_2 = []\n for i in aux_list:\n if i in previous_indexes:\n aux_list_2.append(True)\n else:\n aux_list_2.append(False)\n previous_indexes_bool = aux_list_2\n \n if type == 'max':\n arr[previous_indexes_bool] = -1\n target_index = np.argmax(arr)\n if type == 'min':\n arr[previous_indexes_bool] = np.Inf\n target_index = np.argmin(arr)\n \n return target_index", "def get_idx(lons, lats, lon, lat):\n dist = ((lons - lon) ** 2 + (lats - lat) ** 2) ** 0.5\n return np.unravel_index(dist.argmin(), dist.shape)", "def pair_idx(rows, comm=None):\n raise Exception(\"Not implemented\")\n \n if comm == None:\n comm = MPI.COMM_WORLD\n \n total = comb(rows,2,exact=True)\n size = comm.Get_size()\n \n size = 1000\n \n print(total / size)\n \n target = total / size\n \n current_row = 0\n calc_list = []\n row_list = [[] for x in range(size)]\n for rank in range(size):\n row_list[rank].append(current_row)\n \n current_calcs = 0\n \n for value in range(current_row, rows):\n current_calcs += value\n if current_calcs > target:\n if rank == size-1:\n pass\n else:\n break\n \n calc_list.append(current_calcs)\n row_list[rank].append(value)\n current_row = value\n \n return row_list,calc_list", "def _findMin(p, A):\n\n m=(-1, (0,0))\n for p0 in A:\n dist = np.linalg.norm(p0-np.array(p))\n if m[0]==-1 or m[0]>dist:\n m = (dist, p0)\n \n return tuple(m[1])", "def distance_matrix(n_row, n_col):\n\n n_pop = int(n_row * n_col)\n center = int(n_row/2*(n_col+1))\n\n pop_idx = np.arange(n_pop)\n pop_idx_col = np.remainder(pop_idx, n_col)\n pop_idx_row = pop_idx // n_row\n\n pos = np.vstack((pop_idx_col,pop_idx_row)).T\n distance = spa.distance.cdist([pos[center]], pos)[0]\n\n return distance", "def min(weightData , dataSetVector ):\r\n # weightData: pass the whole weightData array.\r\n # dataSetVector: pass the a data vector to compare with weightdata array, to find its closest match\r\n winnerIndex = 0 #flag for initalizing the winner index\r\n minValue = EcuDist(dataSetVector,weightData[0]) # initalize the minValue\r\n # iterate through all weighdata rows to find the closest match, depending on ecu. distance,\r\n #and then return the index of the closest match(winner)\r\n for i in range(weightData.shape[0]):\r\n if(EcuDist(dataSetVector,weightData[i]) < minValue):\r\n minValue = EcuDist(dataSetVector,weightData[i])\r\n winnerIndex = i\r\n return winnerIndex", "def find_min(self, A, w):\n import numpy as np\n\n vcost = self.INFINITY\n vto = vfrom = -1\n for v in w:\n # Get array offset of minimum of this vertex\n i = np.argmin(A[v,:])\n if A[v,i] < vcost:\n vcost = A[v,i]\n vto = i\n vfrom = v\n return (vfrom, vto, vcost)", "def get_nearest_atom_inds_per_mol(self):\n self.closest_at_per_mol = np.zeros((self.nmol,\n self.at_per_mol,\n self.at_per_mol-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.at_per_mol)\n for imol in range(self.nmol):\n for iat in range(self.at_per_mol):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist_per_mol[imol, iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_at_per_mol[imol, iat] = at_inds", "def find_nearest_neighbours_SURF(averageDistance,inst,distanceArray,maxInst): \r\n NN=[]\r\n min_indices=[] \r\n\r\n for j in range(maxInst):\r\n if inst != j:\r\n locator = [inst,j]\r\n locator = sorted(locator, reverse=True) #Access corect half of table (result of removed table redundancy)\r\n d = distanceArray[locator[0]][locator[1]]\r\n if d<averageDistance:\r\n min_indices.append(j)\r\n \r\n for j in range(len(min_indices)):\r\n NN.append(min_indices[j])\r\n \r\n return NN", "def find_nearest_neighbors(p, points, k=5):\n dist = np.zeros(points.shape[0])\n for i in range(len(dist)):\n dist[i] = distance(p, points[i])\n ind = np.argsort(dist)\n return ind[0:k]", "def update_distmatrix(min_idx, dist_matrix):\n i, j = min_idx\n new_cluster_dist = np.minimum(dist_matrix[i, :], dist_matrix[j, :])\n new_cluster_dist[i] = np.inf\n \n dist_matrix[i, :] = new_cluster_dist\n dist_matrix[:, i] = new_cluster_dist\n \n dist_matrix[j, :] = np.inf\n dist_matrix[:, j] = np.inf\n \n return dist_matrix", "def get_nearest_node_index(node_list, random_node):\n\n dist_list = [\n (node.x - random_node.x) ** 2 + (node.y - random_node.y) ** 2\n for node in node_list\n ]\n minind = dist_list.index(min(dist_list))\n\n return minind", "def getNeighbors(training_data, test_row, k):\n\n distances = list()\n for training_row in training_data:\n dist = euclidianDistance(training_row, test_row)\n distances.append([training_row, dist])\n \n #Sort on the basis of dist\n distances.sort(key=lambda row:row[1])\n\n neighbors = list()\n\n for i in range(int(k)):\n neighbors.append(distances[i][0])\n\n return neighbors", "def eeg_findnearest(x,X):\t\n\t#x array or vector and X a scalar\n\tabsdif = np.abs(x-X)\n\tval = np.min(absdif)\n\tidx = absdif.argmin()\n\treturn val,idx", "def get_nearest_atom_inds(self):\n # Create empty data structure\n self.closest_ats = np.zeros((self.natom, self.natom-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.natom)\n for iat in range(self.natom):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist[iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_ats[iat] = at_inds", "def closestCluster(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n\treturn bestIndex", "def get_min_shannon_entropy(grid):\r\n curr_min = math.inf\r\n curr_best = []\r\n for i in range(len(grid[0])):\r\n for j in range(len(grid)):\r\n if not grid[j][i].collapsed:\r\n w = grid[j][i].block_weights\r\n shannon_entropy = sum([-math.log(el) for el in w] )\r\n if shannon_entropy < curr_min:\r\n curr_min = shannon_entropy\r\n curr_best = [(i,j)]\r\n elif shannon_entropy == curr_min:\r\n curr_best.append((i,j))\r\n idx = np.random.choice(range(len(curr_best))) #choose randomly if theres a tie\r\n return curr_best[idx] #x,y\r", "def findSmallest(distancesWithNames):\n smallest = distancesWithNames[0][2]\n smallestIndex = -1\n for i in range(len(distancesWithNames)):\n if smallest >= distancesWithNames[i][2]:\n smallest = distancesWithNames[i][2]\n smallestIndex = i\n return smallestIndex", "def get_index_under_point(self, event):\r\n xy = np.asarray(list(zip(self.xs, self.ys)))\r\n xyt = self.line.get_transform().transform(xy)\r\n xt, yt = xyt[:, 0], xyt[:, 1]\r\n d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2)\r\n pt_idx = np.argmin(d)\r\n if d[pt_idx] >= self.max_pixels_from_vertex:\r\n pt_idx = None\r\n return pt_idx", "def find_nearest_neighbour_from_point(point_cloud:np.ndarray, point:int) -> int:\n pass", "def find_min_path(s, t, dist):\n\n rows = len(dist) - 1\n cols = len(dist[0]) - 1\n col = cols\n row = rows\n pos_str = \"Position: (row={} col={}) -> (row={} col={})\"\n cst_str = \"Cost: {}\"\n prev_row = row\n prev_col = col\n\n # init sparse path matrix\n sparse_path = [[\" \" for x in range(cols + 1)] for x in range(rows + 1)]\n sparse_path[0][0] = \"0\"\n\n # start with operation at (rows, cols) and work backwards\n sparse_path[rows][cols] = dist[rows][cols]\n\n if verbose == 2:\n print()\n print(\"Initial Minimum Path Matrix:\")\n print_matrix(s, t, sparse_path)\n\n while True:\n\n # bail out if we are in the corner\n if row == 0 and col == 0:\n break\n\n # if we are not at a matrix boundary\n if row != 0 and col != 0: # if at left edge or top row, cannot move diagonally\n\n # diagonal\n if (dist[row - 1][col - 1] == min(dist[row - 1][col],\n dist[row][col - 1],\n dist[row - 1][col - 1])) and (dist[row - 1][col - 1] == dist[row][col] or dist[row - 1][col - 1] == dist[row][col] - 1):\n sparse_path[row - 1][col - 1] = dist[row - 1][col - 1]\n temp_cost = dist[row - 1][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # left\n elif dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # above\n else:\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # if at matrix edge, can only move up\n elif col == 0:\n # above\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # must be at row boundary, can only move left\n else:\n # left\n if dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # print matrix\n if verbose == 2:\n print_matrix(s, t, sparse_path)\n\n return sparse_path", "def closestClusterAndDistance(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n return (bestIndex, closest)", "def get_nearest_indices(vector, matrix, num=5):\n if len(matrix) < num:\n num = len(matrix)\n\n m, n = matrix.shape\n diff_matrix = np.tile(vector, (m, 1)) - matrix\n diff_matrix = abs(diff_matrix)\n\n distance = diff_matrix.sum(axis=1)\n sortIndices = np.argsort(distance)\n sortIndices = sortIndices[0:num]\n\n return sortIndices[random.randint(0, num - 1)]", "def detec_mfo_dist(betas):\n k = len(betas)\n min_dist = np.inf\n for i in range(k-1):\n for j in range(i+1,k):\n distance = np.sum((betas[i,:] - betas[j,:])**2)\n if distance < min_dist:\n MFO_index = [i,j]\n min_dist = distance\n return MFO_index", "def get_nearest_row(self):\n return (self.rect.top - (self.screen.get_height() // 12)) // self.maze.block_size", "def smallest_cert(pts):\n # 1. project the points\n def projected_flattened_pt(pt):\n mag = distance(pt)\n distance_wanted = rect_dist\n return distance_wanted/mag*pt[1], \\\n distance_wanted/mag*pt[2]\n\n flat_pts = np.array([[projected_flattened_pt(pt), pt] for pt in pts])\n\n # 2. identify rows of data\n data = {} \n # maps height (from ground) to the \"row\" (scan) list of\n # horizontal-only (1D) data\n for pt, origpt in flat_pts:\n for row_height in data:\n if abs(pt[1]-row_height) < dist_bt_rows/2:\n data[row_height].append([pt[0], origpt])\n break\n else:\n data[pt[1]] = [[pt[0], origpt]]\n\n #3. build up final_data so each row has enough point x-density\n def get_left_i(pts, left_edge):\n # get last index <= left_edge\n for i, pt in enumerate(pts):\n if pt[0] > left_edge:\n return i - 1\n\n def get_right_i(pts, right_edge):\n # get first index >= right_edge\n i = len(pts)-1\n while True:\n pt = pts[i]\n if pt[0] < right_edge:\n return i + 1\n i -= 1\n\n final_data = []\n for _, subdata in data.items():\n subdata = sorted(subdata, key=lambda t: t[0])\n i = get_left_i(subdata, lane_left)\n end = get_right_i(subdata, lane_right)\n\n final_data = [subdata[i][1]]\n while i < end:\n next_i = get_left_i(subdata[i:], \n subdata[i][0]+max_xpt_separation)\n if not next_i:\n return False\n i += next_i\n final_data.append(subdata[i][1])\n\n return final_data", "def _lowestDistanceToCluster(self, clusters: ndarray, sequenceIdx: int) -> Tuple[int, float]:\n lowestClusterIdx = -1\n lowestDistance = np.inf\n for cIdx in range(self.numClusters):\n distance = self._distanceToCluster(clusters[cIdx], sequenceIdx)\n if distance < lowestDistance:\n lowestClusterIdx = cIdx\n lowestDistance = distance\n return lowestClusterIdx, lowestDistance", "def find_closest_atom(coords1, coords2):\n\n coords1 = np.array(coords1)\n coords2 = np.array(coords2)\n diff = coords2[:, np.newaxis] - coords1[np.newaxis, :]\n dist = np.einsum('ijk->ij', diff**2)**0.5\n index = np.argmin(dist)\n return index", "def closestCentroids(self, points , centroids ):\n dists = scipy.spatial.distance.cdist(points,centroids)\n # 1 is dimension\n minIds = numpy.argmin(dists, 1)\n return minIds", "def find_neighbor_indices(atoms, probe, k):\r\n coords_all = atoms.getCoords()\r\n neighbor_indices = []\r\n atom_k = atoms[k]\r\n radius = atom_k.getRadius() + probe + probe\r\n\r\n indices = range(k)\r\n indices.extend(range(k+1, len(atoms)))\r\n\r\n for i in indices:\r\n\tdist = pos_distance(coords_all[k], coords_all[i])\r\n\t#dist = np.linalg.norm(coords_all[k] - coords_all[i])\r\n\tif dist < radius + atoms[i].getRadius():\r\n neighbor_indices.append(i)\r\n\r\n return neighbor_indices", "def exhaustive_search(X, z):\n #initialize shortest path and distance to the first row\n d_star = la.norm(z - X[0])\n x_star = X[0]\n #check the distance from each row of X and if shortest thus far, save it\n for i in range(X.shape[0]):\n #calculate distance from current row to target z\n x = X[i,:]\n cur_dist = la.norm(x - z)\n #if the distance is smallest thus far, save the row and distance\n if cur_dist < d_star:\n x_star = x\n d_star = cur_dist\n return x_star, d_star", "def _retrieve_neighbors(df, i_point, point, eps, column):\n neigborhood = []\n for index, row in df.iterrows():\n if index != i_point:\n a = np.array(point[column])\n b = np.array([row[column]])\n distance = np.linalg.norm(a - b)\n if distance <= eps:\n neigborhood.append(index)\n\n return neigborhood", "def nearest_neighbor(data):\n features = set([i for i, x in enumerate(data[0][1])])\n return leave_one_out_cross_validation(data, features)", "def closest_node(node, nodes):\n nodes = np.asarray(nodes)\n deltas = nodes - node\n dist_2 = np.einsum(\"ij,ij->i\", deltas, deltas)\n return np.argmin(dist_2), np.min(dist_2)", "def find_nearest_neighbors(p, points, k):\n\timport numpy as np\n\tdistances = np.zeros(points.shape[0])\n\tfor i in range(len(distances)):\n\t\tdistances[i] = distance(p,points[i])\n\tind = np.argsort(distances)\n\treturn ind[0:k]", "def djikstre(connection_mat):\n n = connection_mat.shape[0]\n dist, prev = {}, {}\n Q = list(range(n))\n \n for i in Q:\n dist[i] = np.inf\n dist[n-2] = 0.0\n \n while(len(Q)>0):\n\n min_dist = min([dist[key] for key in Q])\n u = [key for key in Q if dist[key] == min_dist][0]\n Q.remove(u)\n\n for v in np.nonzero(connection_mat[:, u])[0]:\n \n alt = dist[u]+connection_mat[v, u]\n \n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n \n return dist, prev", "def neirest_neighbour(business, cells):\n array = cells.get_neighbours(business, num=1)\n neighbours = pd.DataFrame(array).set_index('index')\n index = neighbours['distance'].idxmin()\n return neighbours.loc[index]", "def closest(self, x, y):\n if self.direction == 'horizontal':\n p_pts = np.array([\n self.ax.transData.transform((p, 0))[0] for p in self.positions\n ])\n dist = abs(p_pts - x)\n else:\n p_pts = np.array([\n self.ax.transData.transform((0, p))[1] for p in self.positions\n ])\n dist = abs(p_pts - y)\n index = np.argmin(dist)\n return index, dist[index]", "def _nearest_point_index(points, point):\n distance = sys.float_info.max\n index = None\n for i, p in enumerate(points):\n temp = _vec_distance(p, point)\n if temp < distance:\n distance = temp\n index = i\n return index, distance", "def _kth_nearest_neighbor_dist(\n distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix], k\n) -> np.ndarray:\n\n if not is_integer(k):\n raise ValueError(f\"parameter 'k={k}' must be a positive integer\")\n else:\n # make sure we deal with Python built-in\n k = int(k)\n\n if not (0 <= k <= distance_matrix.shape[1]):\n raise ValueError(\n \"'k' must be an integer between 1 and \"\n f\"distance_matrix.shape[1]={distance_matrix.shape[1]}\"\n )\n\n if isinstance(distance_matrix, np.ndarray):\n dist_knn = np.partition(distance_matrix, k - 1, axis=1)[:, k - 1]\n elif isinstance(distance_matrix, scipy.sparse.csr_matrix):\n # see mircobenchmark_kth_nn.py for a comparison of implementations for the\n # sparse case\n\n def _get_kth_largest_elements_sparse(\n data: np.ndarray,\n indptr: np.ndarray,\n row_nnz,\n k_neighbor: int,\n ):\n dist_knn = np.zeros(len(row_nnz))\n for i in range(len(row_nnz)):\n start_row = indptr[i]\n dist_knn[i] = np.partition(\n data[start_row : start_row + row_nnz[i]], k_neighbor - 1\n )[k_neighbor - 1]\n\n return dist_knn\n\n row_nnz = distance_matrix.getnnz(axis=1)\n\n if (row_nnz < k).any():\n raise ValueError(\n f\"There are {(row_nnz < k).sum()} points that \"\n f\"do not have at least k_neighbor={k}.\"\n )\n\n dist_knn = _get_kth_largest_elements_sparse(\n distance_matrix.data,\n distance_matrix.indptr,\n row_nnz,\n k,\n )\n else:\n raise TypeError(f\"type {type(distance_matrix)} not supported\")\n\n return dist_knn", "def _first_index_with_bigger_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] >= P[i]:\n i -= 1\n return i", "def getNearestLineIndex(row, tagLineNumbers):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # initialize local auxiliary variables {{{\n nearestLineNumber = -1\n nearestLineIndex = -1\n # }}}\n\n # go through all tag line numbers and find the one nearest to the specified row {{{\n for lineIndex, lineNumber in enumerate(tagLineNumbers):\n # if the current line is nearer the current cursor position, take it {{{\n if (nearestLineNumber < lineNumber <= row):\n nearestLineNumber = lineNumber\n nearestLineIndex = lineIndex\n # }}}\n\n # if we've got past the current cursor position, let's end the search {{{\n if (lineNumber >= row):\n break\n # }}}\n # }}}\n\n # return index of the line with the nearest tag\n return nearestLineIndex\n # }}}", "def shortestpathij(self, i, j):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for i in range(len(pathlist)):\n Temp = 0\n for j in range(len(pathlist[i]) - 1):\n Temp += self.Dismatrix[pathlist[i][j], pathlist[i][j+1]]\n distance.append(Temp)\n \n if(len(distance) == 0):\n return None\n else:\n return min(distance)", "def min_horizontal_dist_meters(coords, targets, is_geo=False):\n xe = coords[:, 0]\n ye = coords[:, 1]\n n = len(xe)\n d = np.zeros(n)\n for j in range(n):\n d1 = dist_in_meters(targets, [xe[j], ye[j]], is_geo=is_geo)\n d[j] = d1.min()\n return d", "def findNearset(x,y,lon,lat):\n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n\n return np.argwhere(dist==dist.min())[0][0]", "def get_nearest_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) <= 1: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def _low_tri_indices(rowCount):\n for col in range(rowCount):\n for row in range(col, rowCount):\n yield (row, col)", "def min_distance(s1, s2):\n n = len(s1)\n m = len(s2)\n matrix = [([0]*(m+1)) for i in xrange(n+1)]\n for i in xrange(m+1):\n matrix[0][i] = i\n for i in xrange(n+1):\n matrix[i][0] = i\n for i in xrange(1,n+1):\n for j in xrange(1,m+1):\n temp = min(matrix[i-1][j]+1, matrix[i][j-1]+1)\n d = 0 if s1[i-1]==s2[j-1] else 1\n matrix[i][j] = min(temp, matrix[i-1][j-1]+d)\n return matrix[n][m]", "def extract_min(H, ds):\n minDist = approxInf\n u = None # min vertex unknown\n i = 0\n for v in H:\n if ds[v] <= minDist:\n minDist = ds[v]\n u = v # note that u is unused (instead returned by pop)\n imin = i\n i += 1\n return(H.pop(imin)) # return [u, d]", "def findminpath(tab, gxtab, gytab, pixtab):\n\n pathdist = 2 # the number of points each points on a ray can related to on the previous ray\n pathdist_penalty = 0.3 # penalty of the difference of the pathdist\n pathpix_penalty = 2 # penalty of the difference of pixel values between the point and the previous point\n nray = tab.shape[1]\n\n #tab = np.hstack((tab,tab[:, 0].reshape(tab.shape[0], 1)))\n #pixtab = np.hstack((pixtab,pixtab[:, 0].reshape(pixtab.shape[0], 1)))\n #gxtab = np.hstack((gxtab,gxtab[:, 0].reshape(gxtab.shape[0], 1)))\n #gytab = np.hstack((gytab,gytab[:, 0].reshape(gytab.shape[0], 1)))\n\n tab = np.hstack((tab,tab,tab)) # horizontally stack the tab matrix to prepare for the filtering on the result\n pixtab = np.hstack((pixtab,pixtab,pixtab))\n gxtab = np.hstack((gxtab,gxtab,gxtab))\n gytab = np.hstack((gytab,gytab,gytab))\n\n tab = (tab - tab.min()) / (tab.max() - tab.min()) # noralize the tab matrix\n pixtab = (pixtab - pixtab.min()) / (pixtab.max() - pixtab.min()) * -1 # for we want to find the white contour of the cell so we multipy -1 on the pixtab\n # tab = tab / np.median(tab)\n # pixtab = pixtab / np.median(pixtab)\n path = np.zeros(tab.shape)\n path[:, 0] = np.array(range(0, tab.shape[0]))\n score = np.zeros(tab.shape)\n score[:, 1] = tab[:, 1]\n\n for i in range(1, tab.shape[1]):\n for j in range(tab.shape[0]):\n mins = np.Inf # record the min value of the ray\n minat = 0\n for k in range(-pathdist, pathdist+1):\n if(0 <= (j+k) and (j+k) < tab.shape[0]):\n s = pixtab[j, i]\n pixdiff = abs(pixtab[j, i] - pixtab[j+k, i-1])\n s += pixdiff * pathpix_penalty # two kinds of penalty\n s += abs(k) * pathdist_penalty\n s += score[j+k, i-1]\n\n if(s < mins):\n mins = s\n minat = j + k\n path[j, i] = minat\n score[j, i]= mins\n\n start = int(np.argmin(score[:, -1]))\n path = path.astype(np.int32)\n minpath = [start]\n for i in range(tab.shape[1]-1, 0, -1):\n minpath.append(path[minpath[-1], i])\n minpath = minpath[::-1]\n # print(len(minpath))\n minpath = savgol_filter(minpath, 15, 3) # apply a Savitzky-Golay filter to the raw minpath signal\n minpath = minpath[nray:nray*2] # cut the middle part of minpath whose length is nray\n return np.array(minpath).astype(np.int32)", "def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass", "def numNeighbors(minesSet, row_index, cols_index, num_cols, num_rows):\n mines = 0\n for j in np.arange(max(0, cols_index-1), min(num_cols-1, cols_index+1)+1):\n for i in np.arange(max(0, row_index-1), min(num_rows-1, row_index+1)+1):\n if ((i, j) in minesSet):\n mines+=1\n return mines", "def index_condensed_matrix(n, i, j):\n if i == j:\n main_warning(\"Diagonal elements (i=j) are not stored in condensed matrices.\")\n return None\n elif i > j:\n i, j = j, i\n return int(i * (n - (i + 3) * 0.5) + j - 1)", "def nearest_sparse(self, query):\n self.best_dist = float(\"inf\")\n self.best_element = None\n self._register_best_element = self._register_best_element_single \n self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)\n return self.best_element,self.best_dist", "def find_closest_pt(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find j index of closest grid point\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n jj = N.argsort(work)[0]\n\n # find i index of closest grid point\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n ii = N.argsort(work)[0]\n\n return ii, jj", "def find_nearest(ref_array,target_array):\n ref_tree = scipy.spatial.cKDTree(ref_array)\n dist, indices = ref_tree.query(target_array, k=1)\n return indices", "def locate_source(p,d):\n # M = sensors, n = dimensions\n M, n = p.shape\n p = np.matrix( p ).T\n\n # pick closest receiver\n c = np.argmin(d)\n #sensors delta time relative to sensor c\n d = d - min(d)\n\n indices = list(range(M))\n del indices[c]\n\n A = np.zeros([M-2,n])\n b = np.zeros([M-2,1])\n\n i = indices[0]\n for row,j in enumerate(indices[1:]):\n A[row,:] = 2*( (d[j])*(p[:,i]-p[:,c]).T - \\\n (d[i])*(p[:,j]-p[:,c]).T )\n b[row,0] = (d[i])*((d[j])**2-p[:,j].T*p[:,j]) + \\\n ((d[i])-(d[j]))*p[:,c].T*p[:,c] + \\\n (d[j])*(p[:,i].T*p[:,i]-(d[i])**2)\n\n\n x = np.asarray( np.linalg.lstsq(A,b)[0] )[:,0]\n return x", "def _intra_cluster_distance(distances_row, labels, i):\n mask = labels == labels[i]\n mask[i] = False\n if not np.any(mask):\n # cluster of size 1\n return 0\n a = np.mean(distances_row[mask])\n return a", "def matrix_idx(n_hist, n_req, n_rows):\n\n flat_idx = []\n for i in range(n_rows):\n flat_idx.extend(range(i * n_req, (i + 1) * n_req + n_hist))\n # idx = np.unravel_index(flat_idx, (n_rows, n_hist + n_req))\n\n idx_matrix = np.reshape(flat_idx, (n_rows, n_hist + n_req))\n idxX = idx_matrix[:, n_req:]\n idxY = idx_matrix[:, :n_req]\n\n return idxX, idxY", "def minesweeper(matrix):\n \n num_rows = len(matrix)\n num_cols = len(matrix[0])\n \n adj_mines = []\n \n adj_row = [0]*num_cols\n \n for i in range(num_rows):\n adj_mines.append(adj_row[:])\n \n for r in range(num_rows):\n for c in range(num_cols):\n if matrix[r][c] == True:\n if (r-1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r-1][c-1] += 1\n if (r-1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r-1][c] += 1\n if (r-1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r-1][c+1] += 1\n if (r) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r][c-1] += 1\n if (r) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r][c+1] += 1\n if (r+1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r+1][c-1] += 1\n if (r+1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r+1][c] += 1\n if (r+1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r+1][c+1] += 1\n\n \n return adj_mines", "def nearest_neighbors(self):\n neighbor_distances_and_indices = []\n for idx, data_point in enumerate(self.data):\n distance = self.euclidean_dis(data_point[:-1], self.query) # Calculate the distance between the query\n # example and the current example from the data.\n\n neighbor_distances_and_indices.append((distance, idx)) # Add the distance and the index of the example\n # to an ordered collection\n\n sorted_neighbor_distances_and_indices = sorted(neighbor_distances_and_indices, key=lambda x: x[0]) #\n # Sort the ordered collection of distances and indices from smallest to largest (in ascending order) by\n # the distances\n\n k_nearest_distances_and_indices = sorted_neighbor_distances_and_indices[:self.k] # Pick the first K\n # entries from the sorted collection\n\n k_nearest_labels = [self.data[i][1] for distance, i in k_nearest_distances_and_indices] # Get the labels of\n # the selected K entries\n\n return k_nearest_labels, self.mode(k_nearest_labels)", "def __find_immediate_predecessors(storm_object_table, target_row):\n\n error_checking.assert_is_integer(target_row)\n error_checking.assert_is_geq(target_row, 0)\n error_checking.assert_is_less_than(\n target_row, len(storm_object_table.index)\n )\n\n predecessor_sec_id_strings = [\n storm_object_table[c].values[target_row]\n for c in PREV_SECONDARY_ID_COLUMNS\n if storm_object_table[c].values[target_row] != ''\n ]\n\n num_predecessors = len(predecessor_sec_id_strings)\n if num_predecessors == 0:\n return numpy.array([], dtype=int)\n\n target_time_unix_sec = storm_object_table[\n tracking_utils.VALID_TIME_COLUMN].values[target_row]\n\n predecessor_rows = numpy.full(num_predecessors, -1, dtype=int)\n\n for i in range(num_predecessors):\n these_rows = numpy.where(numpy.logical_and(\n storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values ==\n predecessor_sec_id_strings[i],\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values <\n target_time_unix_sec\n ))[0]\n\n if len(these_rows) == 0:\n continue\n\n this_subrow = numpy.argmax(\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values[\n these_rows]\n )\n\n predecessor_rows[i] = these_rows[this_subrow]\n\n return predecessor_rows[predecessor_rows >= 0]", "def closest_waypoint(self, location: pylot.utils.Location):\n min_dist = np.infty\n min_index = 0\n for index, waypoint in enumerate(self.waypoints):\n dist = waypoint.location.distance(location)\n if dist < min_dist:\n min_dist = dist\n min_index = index\n return min_index", "def medoid_ft(ftvectors,pairwise_dist_matrix):\r\n N = len(ftvectors)\r\n if N == 1 : \r\n return 0 \r\n # pairwise_dist_matrix = calculate_dist_matrix_ft(ftvectors) \r\n sum_dist = np.sum(pairwise_dist_matrix, axis = 0)\r\n min_idx = np.argmin(sum_dist)\r\n return min_idx", "def _partition_nearest(self, medoids, dists, only_these=set()):\n if len(only_these) == 0:\n allowed_inds = self._not_ignored_inds\n else:\n allowed_inds = self._not_ignored_inds & only_these\n closest_medoid_ind = np.argmin(dists[:,medoids], 1) # If len(medoids)==3, would look like [2,1,1,0,1,2,...].\n clusts = [[] for i in medoids]\n for node_ind, med_ind in enumerate(closest_medoid_ind):\n if node_ind in allowed_inds:\n clusts[med_ind].append(node_ind)\n return clusts", "def search_matrix(to_search: int, a_matrix: list) -> (int, int):\n row = 0\n column = len(a_matrix[0]) - 1\n while row < len(a_matrix) and column >= 0:\n if to_search < a_matrix[row][column]:\n column -= 1\n elif to_search > a_matrix[row][column]:\n row += 1\n else:\n return row, column\n return -1, -1", "def closest(self, x, y):\n pts = np.column_stack([self.x, self.y])\n # Transform data coordinates to pixel coordinates.\n pts = self.ax.transData.transform(pts)\n diff = pts - [x, y]\n dist = np.hypot(*diff.T)\n min_index = np.argmin(dist)\n return min_index, dist[min_index]", "def closest_centroid(x,centroids):\n\tK =len(centroids)\n\tN = len(x)\n\tDistance = np.zeros((N,K))\n\tfor j in range(K):\n\t\tmu = centroids[j]\n\t\tDistance[:,j] = np.linalg.norm(x-mu,axis=1)\n\tout = np.argmin(Distance,axis=1) \n\treturn out" ]
[ "0.76830524", "0.7358047", "0.715276", "0.6513678", "0.64565593", "0.645209", "0.64473367", "0.64161915", "0.6344284", "0.6331053", "0.629967", "0.6284017", "0.62775195", "0.6246845", "0.6215876", "0.6197371", "0.61904216", "0.614493", "0.6127443", "0.61131895", "0.6061455", "0.6061441", "0.6057897", "0.6010826", "0.59732074", "0.59727025", "0.59643596", "0.5957675", "0.5939356", "0.5919417", "0.5908226", "0.5907221", "0.5905387", "0.5903414", "0.5898643", "0.5890703", "0.58218634", "0.58150864", "0.5802884", "0.57944125", "0.5784227", "0.5776671", "0.57577515", "0.57474875", "0.57286876", "0.5727042", "0.5725983", "0.57213503", "0.57150173", "0.5697625", "0.5687498", "0.5668719", "0.56657505", "0.5657357", "0.5656998", "0.5654915", "0.56396127", "0.56382775", "0.5636967", "0.5634042", "0.5632858", "0.56289357", "0.56117386", "0.5610537", "0.5605596", "0.56049234", "0.5604687", "0.5603997", "0.55920154", "0.5590341", "0.5588727", "0.55827326", "0.5581248", "0.5567834", "0.55632454", "0.55569965", "0.5545793", "0.5543416", "0.55275446", "0.55223936", "0.5515401", "0.5513707", "0.5511564", "0.5510364", "0.5501646", "0.5498301", "0.54808396", "0.54785067", "0.54752326", "0.5470072", "0.5469641", "0.546202", "0.545913", "0.5458002", "0.54521465", "0.544734", "0.54414034", "0.5437693", "0.5436167", "0.54293483" ]
0.6456699
4
Convert the degree vector to degree function.
def degree_vector_to_matrix(vector): return np.diag(vector)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vecToFunc(vector):\n def f(x):\n f = 0\n for i in range(len(vector)):\n f += vector[i]*x**i\n return f\n return f", "def degree_v(self):\n return self._degree_v", "def degree(x):\n return x*(180.0/math.pi)", "def laplacian(degree_vector, weight_matrix):\n return np.diag(degree_vector) - weight_matrix", "def angleToVector(teta):\r\n x=1 #we fix x and we will determine z in consquence\r\n #we want z/x=tan(teta) so:\r\n z=np.tan(teta)*x\r\n return((x,z))", "def rad2deg(x):\r\n # see decorator for function body\r", "def in_degrees_tf_vector(self):\n return tf.reshape(self.in_degrees_tf, [self.n])", "def degrees(x):\n return 0.0", "def vector_to_int(a_vec: np.ndarray, characteristic: int, degree: int) -> int:\n a = 0\n factor = 1\n for i in range(degree - 1, -1, -1):\n a += a_vec[i] * factor\n factor *= characteristic\n\n return a", "def rad2deg(a):", "def out_degrees_tf_vector(self):\n return tf.reshape(self.out_degrees_tf, [self.n])", "def rotorconversion(x):\n return cf.MultiVector(layout, val_rotorconversion(x))", "def degree(g, x, norm, deg, rates):\n if type(x) == list: \n x = az.transform(x)\n if norm == 1:\n if network_matrix(x, rates).sum() == 0:\n return 1./x.count(\"1\")\n if deg == \"in\":\n return network_matrix(x, rates).sum(1)[az.transform(x).index(g)]/network_matrix(x, rates).sum()\n else:\n return network_matrix(x, rates).sum(0)[az.transform(x).index(g)]/network_matrix(x, rates).sum()\n else:\n if deg == \"in\":\n return network_matrix(x, rates).sum(1)[az.transform(x).index(g)]\n else:\n return network_matrix(x, rates).sum(0)[az.transform(x).index(g)]", "def V2lambda(V):\n return(3956/V)", "def v_o(A,vd):\n return A*vd", "def angle_between_vectors_degrees(u, v):\n a = np.dot(u, v)\n b = np.linalg.norm(u)\n c = np.linalg.norm(v)\n d = a / (b* c)\n if d > 1:\n d = 1\n if d < -1:\n d = -1\n e = acos(d)\n f = np.degrees(e)\n return f", "def dV(X):\n return -4 * a * np.power(X, 3) + 2 * b * X", "def cos2degrees(x):\n return math.degrees(math.acos(x))", "def derFunc(self,x_vec):\n x=x_vec[0]\n y=x_vec[1]\n z=np.array([2*(self.x_0-x),2*(self.y_0-y)])\n return z*self.sfunc(x,y)", "def val_rotorconversion(x):\n B_val = val_vec_repr_to_bivector(x)\n R_val = val_exp(B_val)\n return R_val", "def degree(self, u, outgoing=False):\n return NotImplementedError()", "def deg2rad(a):", "def _orthogonal_vector(vector):\n return -1 * vector[1], vector[0]", "def degc_to_degf(input_temp):\r\n return (input_temp * 1.8) + 32", "def vector_angle(v):\n assert len(v) == 2\n x, y = v\n return np.arctan2(y, x)", "def StepsToDegree(self,Steps):\n return int(round(Steps/(self.Reduction/self.StepAngle/2)))", "def trans(x):\r\n return np.arccos(1-2*x)", "def deg2rad(x):\r\n # see decorator for function body\r", "def angles_vectors_degrees(u, v):\n a = angle_smallest_vectors_degrees(u, v)\n return a, 360. - a", "def construct_angle_degrees(loader, node):\n value = loader.construct_scalar(node)\n exprvalue = value\n if exprvalue.startswith(\"deg(\"):\n exprvalue = exprvalue.strip()[4:-1]\n try:\n return float(exprvalue) * math.pi / 180.0\n except ValueError:\n raise RosParamException(\"invalid degree value: %s\"%value)", "def vec_angle_deg(v1,v2):\r\n \r\n return math.degrees(vec_angle_rad(v1,v2))", "def to_celsius(degrees):\n return (degrees - 32) * (5/9)", "def _asind(v):\n return math.degrees(math.asin(v))", "def poly(x, degree=2):\n x = np.array(x)\n X_trans = np.transpose(np.vstack((x**k for k in range(degree + 1))))\n return np.linalg.qr(X_trans)[0][:, 1:]", "def f(x):\n return np.tan(x) - np.sin(x) - (m*g)/(2*k*L)", "def z_to_vector(vector):\n norm = np.linalg.norm(vector)\n if norm == 0:\n return np.identity(3)\n v = np.array(vector) / norm\n phi = np.arccos(v[2])\n if any(v[:2]):\n #projection of vector to unit circle\n axis_proj = v[:2] / np.linalg.norm(v[:2])\n theta = np.arccos(axis_proj[0])\n if axis_proj[1] < 0:\n theta = -theta\n else:\n theta = 0\n phi_down = np.array([\n [np.cos(phi), 0, np.sin(phi)],\n [0, 1, 0],\n [-np.sin(phi), 0, np.cos(phi)]\n ])\n return np.dot(rotation_about_z(theta), phi_down)", "def vec2angle(vec):\n return round(atan2(vec[1], vec[0]), 3)", "def degree_on_basis(self, b):\n return sum(b)", "def angle2fD(angle,units='degrees'):\n if units=='degrees':\n angle*=(math.pi/180.0)\n X = 4.0*math.tan(angle/4.0)\n fD = 1.0/X\n return fD", "def deg2rad_inplace(a):", "def funcv(x):\n f0 = x[0] ** 3.0 + x[1] + 3.0\n f1 = x[1] - 4.0 * x[0]\n return f0, f1", "def zzx_degree(f):\n return len(f) - 1", "def degree_u(self):\n return self._degree_u", "def degree_to_radians(degree):\n return degree * pi / 180", "def atan2_vec(vector):\n return -np.arctan2(vector[1], vector[0])", "def build_poly(x, degree):\n phi = np.ones(len(x))\n phi = np.vstack((phi, [x**(j+1) for j in range(degree)]))\n \n return phi.T", "def rad2deg_inplace(a):", "def dir_vect(theta):\n return np.array([np.cos(theta),np.sin(theta)])", "def get_angle(self, t_step, degree=True):\n v_vector = np.array([self.s_velocity[t_step], self.d_velocity[t_step]])\n u_v1 = self._get_unit_vector(v_vector)\n rad = np.arccos(np.clip(np.dot(self.ref_vector, u_v1), -1.0, 1.0))\n if degree:\n return np.rad2deg(rad) * np.sign(self.d_velocity[t_step])\n else:\n return rad", "def angle_smallest_vectors_degrees(u, v):\n return degrees(angle_smallest_vectors)", "def zx_rotation(vector,theta):\r\n R = np.array([[np.cos(theta),0,np.sin(theta)],\r\n [0,1,0],\r\n [-np.sin(theta),0,np.cos(theta)]\r\n ])\r\n return np.dot(R,vector)", "def custom_np_function(vector):\n summation = 0\n for item in vector:\n summation += item ** 2\n return summation", "def angle_of_vector(vector):\n z = complex(*vector[:2])\n if z == 0:\n return 0\n return np.angle(complex(*vector[:2]))", "def __multidegree(f):\n\n mon = f.monomials()\n mon.sort() # Sorts them with the polynomial ring order\n return mon[-1].degrees() # The multidegree is the degree of the biggest monomial", "def value_to_angle(value):\n return ...", "def rot(vec, angle, degrees=True):\r\n if degrees:\r\n angle = np.radians(angle)\r\n r = np.array([[np.cos(-angle), -np.sin(-angle)],\r\n [np.sin(-angle), np.cos(-angle)]])\r\n return r.dot(vec)", "def degree(f, j=0):\n return dmp_degree_in(f.rep, j, f.lev)", "def diriv(x, params):\n return np.array([x,1])", "def degree_list(f):\n return dmp_degree_list(f.rep, f.lev)", "def degree(self):\n return sum(self)", "def out_degrees_tf_vertex(self, vertex: int) -> tf.Tensor:\n return tf.gather(self.out_degrees_tf, [vertex])", "def d(u, v):\r\n\tdiff = u-v\r\n\treturn diff.dot(diff)", "def __getPolynomial(self) -> 'func':\n return lambda x: sum(self.pts[i]*base(x)\n for i, base in enumerate(self.basis))", "def _acosd(v):\n return math.degrees(math.acos(v))", "def steps_to_angle():\n pass", "def proj_tan(self, v, x, c):\n return v", "def to_povray_func(self):\n\n func_str = \"{}\".format(self.mth)\n\n for mb in self.mbs:\n func_str += (\" - {} \"\n \"* exp(-(pow(x-{},2) + pow(y-{},2) + pow(z-{},2))\"\n \"/ (2 * pow({},2)))\"\n .format(mb.a, mb.x, mb.y, mb.z, mb.s))\n\n return func_str", "def normalize_in_degrees(num_nodes, dict):\n y = [val / num_nodes for key, val in dict.items()]\n \n return y", "def polynomial_degree(self):\n return polynomial_degree(self)", "def degree(self, k):\n one_if_my_all_k = 1 if self.my_alll(k) else 0\n return (2 * self.get_prp_j(k)[0]) + one_if_my_all_k", "def V_phi(self, x):\n\n x = self.featureExtractor.getFeatures(x)\n\n x = torch.tensor(x).float()\n\n x = F.relu(self.linear(x))\n\n v = self.linear_v(x)\n\n return v", "def activ_fn_derivative(z):\n return 1 - np.square(np.tanh(z))", "def tan(self, a):\n return math.tan(a)", "def to_linear(self):\n return inv(quad_hybrid).dot(self.circular)", "def f(x):\n return N.sqrt(N.power(N.cos(x),2)+1.0)", "def AngleDtoR(degree):\n\trad=degree*math.pi/180\n\treturn rad", "def acc(x: float, v: float, t: float) -> float:\n return -k*v - np.sin(x) + c*np.cos(omega*t)", "def convertToDiscreteFunction(boolean: bool) -> cern.japc.value.DiscreteFunction:\n ...", "def F(x):\n soln = x - (1.0/5.0)*math.cos(10.0*x+1.0) \n return soln", "def in_degrees_np(self) -> np.ndarray:\n return self.run_tf(self.in_degrees_tf)", "def angle(self, vec, unit='rad'):\n raise NotImplementedError('angle not implemented for VectorArrays')", "def Avv_func(f):\n\n def Avv(x, v):\n def F(s):\n return f(x + v * s)\n\n return jacfwd(jacfwd(F))(0.0)\n\n return Avv", "def dec(self):\n return gon2dec(self.gon_angle)", "def elevation_to_a_degree(numb1, numb2):\r\n return f\"Your result: {numb1**numb2}\"", "def transform(val):\n return (100*math.atan(val) + 50*math.pi) / math.pi", "def rotate(vector, angle):\n return np.cos(angle) * vector[0] + np.sin(angle) * vector[1], \\\n -np.sin(angle) * vector[0] + np.cos(angle) * vector[1]", "def angle_2D(v):\n len_v=(v[0]**2+v[1]**2)**(0.5)\n if len_v==0:\n return 0\n ret = math.acos(v[0]/len_v)\n if v[1]<0:\n ret=6.283185307179586-ret\n return ret", "def get_degrees(self):\n return np.arange(self.lmax + 1)", "def orthogonal(v):\n return np.array([-v[1], v[0]])", "def ret2dva(r_um):\n sign = np.sign(r_um)\n r_mm = 1e-3 * np.abs(r_um)\n r_deg = 3.556 * r_mm + 0.05993 * r_mm ** 2 - 0.007358 * r_mm ** 3\n r_deg += 3.027e-4 * r_mm ** 4\n return sign * r_deg", "def degree(self, v):\n self._validateVertex(v)\n return self._adj[v].size()", "def ang_fac(ang):\n return np.exp(2 * 1j * ang)", "def _angle(u, v, w, d='+'):\n vu = np.arctan2(u[1] - v[1], u[0] - v[0])\n vw = np.arctan2(w[1] - v[1], w[0] - v[0])\n phi = vw - vu\n if phi < 0:\n phi += 2 * np.pi\n if d == '-':\n phi = 2 * np.pi - phi\n return np.round(phi, 6)", "def angle(z):", "def convertFahrenheitToCelsius(degrees):\n degrees = str(degrees)\n convert = ((decimal.Decimal(degrees) - 32) * 5) / decimal.Decimal('9')\n return float(convert)", "def zerodegree_pol(dim):\n\n out = zeros(dim)\n out[0] = 1\n\n return out", "def int_to_vector(a: int, characteristic: int, degree: int) -> np.ndarray:\n a_vec = np.zeros(degree, dtype=DTYPE)\n for i in range(degree - 1, -1, -1):\n q, r = divmod(a, characteristic)\n a_vec[i] = r\n a = q\n\n return a_vec", "def conv(x):\n return x#-2*(16.41*x + 65.04-95.12) ", "def total_degree(f):\n return dmp_total_degree(f.rep, f.lev)", "def map_from_eta_to_n_v(eta):\n eta_0, eta_1 = concatenated_vector_to_scalar_matrix(eta)\n n = 2 * eta_0\n v = np.linalg.inv(-2 * eta_1)\n return n, v" ]
[ "0.6630002", "0.63732994", "0.61321235", "0.608381", "0.6005241", "0.600304", "0.5996289", "0.5975939", "0.5958366", "0.5938791", "0.5896974", "0.5866529", "0.58305264", "0.58234054", "0.58143467", "0.5772291", "0.5767484", "0.57465386", "0.5737111", "0.5736712", "0.5721935", "0.5712332", "0.5710978", "0.56644505", "0.56579167", "0.5646956", "0.56460786", "0.5642318", "0.5640111", "0.56067693", "0.55688775", "0.5564824", "0.55592465", "0.5535152", "0.55256975", "0.5480999", "0.5478233", "0.54646796", "0.546352", "0.54448324", "0.5442682", "0.5429706", "0.54236245", "0.54232883", "0.54181314", "0.54176104", "0.5417287", "0.5413836", "0.5408328", "0.53983355", "0.53578293", "0.53519344", "0.5348229", "0.534499", "0.5339496", "0.53341174", "0.53330547", "0.5325454", "0.5305228", "0.52987915", "0.5298618", "0.5295663", "0.52848953", "0.5284588", "0.5279334", "0.5257715", "0.5254694", "0.5239386", "0.5226055", "0.5212542", "0.5212093", "0.5210101", "0.52074707", "0.5206228", "0.5189406", "0.5187718", "0.51750964", "0.5174233", "0.51690143", "0.5166648", "0.51663303", "0.5165636", "0.51600724", "0.51596093", "0.51566917", "0.5152906", "0.5151586", "0.51513857", "0.5143696", "0.51405907", "0.51388437", "0.51355094", "0.51348346", "0.5133335", "0.51250345", "0.5122582", "0.51183724", "0.51176697", "0.5115295", "0.51101404" ]
0.58353925
12
Construct the fundamental Laplacian matrix.
def laplacian(degree_vector, weight_matrix): return np.diag(degree_vector) - weight_matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_Laplacian_matrix(self, X):\n self.laplacian_mat, self.laplacian_sym_mat, self.laplacian_weights = self.laplacian.compute_laplacian(\n self.get_Affinity_matrix(X)\n )", "def LaplacianMatrix(adjmatrix):\n if adjmatrix.dtype in [np.uint, np.uint0, np.uint8, np.uint16, np.uint32, np.uint64]:\n adjmatrix = adjmatrix.astype(int)\n N = len(adjmatrix)\n\n laplacianmatrix = np.identity(N, dtype=adjmatrix.dtype) * adjmatrix.sum(axis=1)\n laplacianmatrix -= adjmatrix\n\n return laplacianmatrix", "def one_dim_sparse_laplacian(m: int):\n return sp.diags([1.0, -2.0, 1.0], [-1, 0, 1], dtype='float64', shape=(m, m), format='lil')", "def rebuild_the_laplacians():\n local_matrix = InteractomeInterface()\n local_matrix.full_rebuild()\n\n annot_matrix = AnnotomeInterface()\n annot_matrix.full_rebuild()", "def get_laplacian(adjacency: sparse.csr_matrix) -> sparse.csr_matrix:\n weights = adjacency.dot(np.ones(adjacency.shape[0]))\n return sparse.diags(weights) - adjacency", "def generate_graph_laplacian(A):\r\n\r\n #Create symmetric matrix\r\n #A=0.5* (A+ A.T)\r\n \r\n #D is just the identity matrix (because sum(P)=1)\r\n Degree=np.sum(A,1)\r\n D=np.diag(Degree)\r\n \r\n #Laplacian matrix\r\n L=D-A\r\n return L", "def compute_matrix(self):\n\n fac = self.a / self.dx ** 2\n\n diagonal = np.ones(self.nx) * 2 * fac\n lower = np.ones(self.nx - 1) * -fac\n upper = np.ones(self.nx - 1) * -fac\n\n matrix = sp.diags(\n diagonals=[diagonal, lower, upper],\n offsets=[0, -1, 1], shape=(self.nx, self.nx),\n format='csr')\n\n return matrix", "def prepare_laplacian(laplacian):\n\n def estimate_lmax(laplacian, tol=5e-3):\n r\"\"\"Estimate the largest eigenvalue of an operator.\"\"\"\n lmax = sparse.linalg.eigsh(laplacian, k=1, tol=tol,\n ncv=min(laplacian.shape[0], 10),\n return_eigenvectors=False)\n lmax = lmax[0]\n lmax *= 1 + 2 * tol # Be robust to errors.\n return lmax\n\n def scale_operator(L, lmax, scale=1):\n r\"\"\"Scale the eigenvalues from [0, lmax] to [-scale, scale].\"\"\"\n I = sparse.identity(L.shape[0], format=L.format, dtype=L.dtype)\n L *= 2 * scale / lmax\n L -= I\n return L\n\n lmax = estimate_lmax(laplacian)\n laplacian = scale_operator(laplacian, lmax)\n\n laplacian = sparse.coo_matrix(laplacian)\n\n # PyTorch wants a LongTensor (int64) as indices (it'll otherwise convert).\n indices = np.empty((2, laplacian.nnz), dtype=np.int64)\n np.stack((laplacian.row, laplacian.col), axis=0, out=indices)\n indices = torch.from_numpy(indices)\n\n laplacian = torch.sparse_coo_tensor(indices, laplacian.data, laplacian.shape)\n laplacian = laplacian.coalesce() # More efficient subsequent operations.\n return laplacian", "def laplacian_mat(n):\n data = [1, -2, 1]*n\n i = flatten([[k,k,k] for k in range(n)])\n j = flatten([[k-1, k, k+1] for k in range(n)])\n return scipy.sparse.coo_matrix((data[1:-1], (i[1:-1], j[1:-1])))", "def compute_mesh_laplacian(mesh, weights=None, fem_b=None, lap_type=\"conformal\"):\n print(\" Computing Laplacian\")\n if weights is None:\n (weights, fem_b) = compute_mesh_weights(mesh, weight_type=lap_type)\n\n if lap_type == \"fem\":\n weights.data = weights.data / 2\n\n N = weights.shape[0]\n sB = fem_b.sum(axis=0)\n diaB = sparse.dia_matrix((sB, 0), shape=(N, N))\n B = sparse.lil_matrix(diaB + fem_b)\n s = weights.sum(axis=0)\n dia = sparse.dia_matrix((s, 0), shape=(N, N))\n L = sparse.lil_matrix(dia - weights)\n\n # if symmetrize == 1 & & normalize == 0\n # L = diag(sum(W, 2)) - W;\n # elseif\n # symmetrize == 1 & & normalize == 1\n # L = speye(n) - diag(sum(W, 2). ^ (-1 / 2)) * W * diag(\n # sum(W, 2). ^ (-1 / 2));\n # elseif\n # symmetrize == 0 & & normalize == 1\n # L = speye(n) - diag(sum(W, 2). ^ (-1)) * W;\n\n li = np.hstack(L.data)\n print(\" -nb Nan in Laplacian : \", len(np.where(np.isnan(li))[0]))\n print(\" -nb Inf in Laplacian : \", len(np.where(np.isinf(li))[0]))\n\n return L, B", "def uniform_laplacian(image, radius=1):\n height, width = image.shape[:2]\n window_size = 2 * radius + 1\n\n W = sparse_conv_matrix(width, height, np.ones((window_size, window_size)))\n\n return weights_to_laplacian(W)", "def nodalLaplacian(self):\n if getattr(self, '_nodalLaplacian', None) is None:\n print('Warning: Laplacian has not been tested rigorously.')\n # The number of cell centers in each direction\n n = self.vnC\n # Compute divergence operator on faces\n if(self.dim == 1):\n D1 = sdiag(1./self.hx) * ddx(self.nCx)\n L = - D1.T*D1\n elif(self.dim == 2):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n L1 = sp.kron(speye(n[1]+1), - D1.T * D1)\n L2 = sp.kron(- D2.T * D2, speye(n[0]+1))\n L = L1 + L2\n elif(self.dim == 3):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n D3 = sdiag(1./self.hz) * ddx(n[2])\n L1 = kron3(speye(n[2]+1), speye(n[1]+1), - D1.T * D1)\n L2 = kron3(speye(n[2]+1), - D2.T * D2, speye(n[0]+1))\n L3 = kron3(- D3.T * D3, speye(n[1]+1), speye(n[0]+1))\n L = L1 + L2 + L3\n self._nodalLaplacian = L\n return self._nodalLaplacian", "def build_laplacian_over_ordinal_integer_actions(\n action_spec: types.BoundedTensorSpec,\n) -> types.Tensor:\n num_actions = policy_utilities.get_num_actions_from_tensor_spec(action_spec)\n adjacency_matrix = np.zeros([num_actions, num_actions])\n for i in range(num_actions - 1):\n adjacency_matrix[i, i + 1] = 1.0\n adjacency_matrix[i + 1, i] = 1.0\n laplacian_matrix = (\n np.diag(np.sum(adjacency_matrix, axis=0)) - adjacency_matrix\n )\n return laplacian_matrix", "def normalized_laplacian(degree_vector, weight_matrix, length):\n holders = np.zeros((length, 1))\n holders[:, 0] = 1 / degree_vector\n\n return np.eye(length) - holders * weight_matrix", "def decomposeLU(self):\n self.check_square()\n\n N = self.rows\n L = make_matrix(N, N)\n U = make_matrix(N, N)\n A = self #for more math friendly notation\n\n\n for j in range(N):\n L[j, j] = 1.0 #Doolittle factorization\n\n #e.g., if you are in column = 5, you go down 6 rows\n for i in range(j+1):\n U[i, j] = A[i, j] - sum(L[i, k] * U[k, j] for k in range(i))\n #e.g., if you are in column = 5,\n # you start at row 5 and go down for the lower triangular matrix\n for i in range(j, N):\n L[i, j] = (A[i, j] - sum(L[i, k] * U[k, j] for k in range(j))) / U[j, j]\n\n self.L = L\n self.U = U\n return L, U", "def calculate_normalized_laplacian(adj):\n adj = sp.coo_matrix(adj)\n d = np.array(adj.sum(1))\n d_inv_sqrt = np.power(d, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n return normalized_laplacian", "def calculate_normalized_laplacian(adj):\n adj = sp.coo_matrix(adj)\n d = np.array(adj.sum(1))\n d_inv_sqrt = np.power(d, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n return normalized_laplacian", "def lap_mat(self):", "def L1U(A, d):\n \n\n n, _ = A.shape\n L = np.eye(n, n, dtype=A.dtype)\n U = np.zeros((n, n), dtype=A.dtype)\n\n U[0, 0] = A[0, 0]\n for k in range(1, n):\n km = max(0, k-d)\n L[k, km : k] = np.transpose(rforwardsolve(np.transpose(U[km:k, km:k]),\\\n np.transpose(A[k, km:k]), d))\n U[km:k+1, k] = rforwardsolve(L[km:k+1, km:k+1], A[km:k+1, k], d)\n return L, U", "def make_matrix(self):\n self.leftmost_element()\n self.rightmost_element()\n self.interior_element()\n\n #Transforms all sympy symbolic expressions for the lagrange polynomials into callable functions.\n self.psi_funcs = [sym.lambdify([self.x], self.psi[i], modules = \"numpy\") for i in range(3*self.Ne)]", "def laplacian(A):\n #calculate D by creating a diagonal matrix with the column sum of A\n D = np.diag(A.sum(axis=0))\n return D - A", "def init_needleman_wunsch_matrix(self):\r\n empty_matrix = self.empty_matrix() # Building on the previous definition, this will give you an empty matrix\r\n for i in range(len(self.s2)+1):\r\n for j in range(len(self.s1)+1):\r\n empty_matrix[0][i] = -i\r\n empty_matrix[j][0] = -j\r\n return empty_matrix", "def laplacian_matrix(A, normalized=False):\n n, m = A.shape\n D = degree_matrix(A)\n L = D - A\n if normalized:\n degs = _flat(A.sum(axis=1))\n rootD = sps.spdiags(np.power(degs, -1 / 2), [0], n, n, format=\"csr\")\n L = rootD * L * rootD\n return L", "def LUdecomp(Ainput):\n\n n, m = np.shape(Ainput)\n \n if n != m:\n return 'Error: Please enter an invertible matrix.'\n \n U = Ainput.copy() # make copies so as not to write over originals\n L = np.zeros((np.shape(Ainput)))\n \n for i in range(0,n):\n L[i,i] = 1\n for i in range(0,n-1): # loop over pivot rows from row 1 to row n-1 (i to n-2)\n for j in range(i+1,n): # loop over row to be zero'ed from row j+1 to n (j+1 to n-1)\n c = U[j,i]/U[i,i] # multiplicative factor to zero point\n L[j,i] = c\n U[j,i] = 0.0 # we know this element goes to zero\n U[j,i+1:n]=U[j,i+1:n]-c*U[i,i+1:n] # do subtraction of two rows\n\n return (L,U) # return lower and upper decompositions", "def offDiagPairs(self):\n return np.transpose(np.nonzero(np.triu(self.LaplacianMatrix,k=2)))", "def expansion_matrix_dl(self):\n\n row = self._base_nlp._lower_d_map\n nnz = len(self._base_nlp._lower_d_map)\n col = np.arange(nnz, dtype=np.int)\n data = np.ones(nnz)\n return csr_matrix((data, (row, col)), shape=(self.nd, nnz))", "def decompose_to_LU(a):\n # create emtpy LU-matrix\n lu_matrix = np.matrix(np.zeros([a.shape[0], a.shape[1]]))\n n = a.shape[0]\n\n for k in range(n):\n # calculate all residual k-row elements\n for j in range(k, n):\n lu_matrix[k, j] = a[k, j] - lu_matrix[k, :k] * lu_matrix[:k, j]\n # calculate all residual k-column elemetns\n for i in range(k + 1, n):\n lu_matrix[i, k] = (a[i, k] - lu_matrix[i, : k] * lu_matrix[: k, k]) / lu_matrix[k, k]\n\n return lu_matrix", "def laplacian(A):\n D = np.diag(np.sum(A, axis=1))\n return D - A", "def laplacian(mesh):\n faces = np.array(mesh.triangles)\n N = np.array(mesh.vertices).shape[0]\n A = np.zeros((N, N))\n for i in range(3):\n for j in range(3):\n if i == j:\n continue\n A[(faces[:, i], faces[:, j])] = 1.0\n A = A + A.T\n diag = A.dot(np.ones(N))\n L = np.diag(diag) - A\n return L", "def getNormLaplacian(W):\n\td=[np.sum(row) for row in W]\n\tD=np.diag(d)\n\tL=D-W\n\t#Dn=D^(-1/2)\n\tDn=np.power(np.linalg.matrix_power(D,-1),0.5)\n\tLbar=np.dot(np.dot(Dn,L),Dn)\n\treturn Lbar", "def _set_diag(laplacian, value, norm_laplacian):\n n_nodes = laplacian.shape[0]\n # We need all entries in the diagonal to values\n if not sparse.isspmatrix(laplacian):\n if norm_laplacian:\n laplacian.flat[::n_nodes + 1] = value\n else:\n laplacian = laplacian.tocoo()\n if norm_laplacian:\n diag_idx = (laplacian.row == laplacian.col)\n laplacian.data[diag_idx] = value\n # If the matrix has a small number of diagonals (as in the\n # case of structured matrices coming from images), the\n # dia format might be best suited for matvec products:\n n_diags = np.unique(laplacian.row - laplacian.col).size\n if n_diags <= 7:\n # 3 or less outer diagonals on each side\n laplacian = laplacian.todia()\n else:\n # csr has the fastest matvec and is thus best suited to\n # arpack\n laplacian = laplacian.tocsr()\n return laplacian", "def make_mat_cp_le(cons_pot_mesh, lin_geo_mesh):\n pot_faces = cons_pot_mesh.get_faces()\n assert pot_faces.shape[0] == lin_geo_mesh.get_faces().shape[0]\n num_faces = pot_faces.shape[0]\n K = np.zeros((3 * num_faces, 3 * num_faces))\n add_cp_le_DL_terms(K, cons_pot_mesh, lin_geo_mesh)\n add_cp_le_RBM_terms(K, cons_pot_mesh, lin_geo_mesh)\n return K", "def get_correct_distance_matrix(L):\n n = len(L)\n D = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n if i != j:\n D[i][j] = get_minor(L, [i, j], [i, j]) / get_minor(L, [i], [i])\n return D", "def _r_matrix_xxz(self, root):\n r_matrix = np.eye(4, dtype=np.complex128)\n if self.delta == 1:\n b = (root - 1j) / (root + 1j)\n c = 2j / (root + 1j)\n\n elif self.delta > 1:\n gamma = np.arccosh(self.delta)\n b = np.sin(gamma / 2 * (root - 1j)) / np.sin(gamma / 2 * (root + 1j))\n c = 1j * np.sinh(gamma) / np.sin(gamma / 2 * (root + 1j))\n else:\n gamma = np.arccos(self.delta)\n b = np.sinh(gamma / 2 * (root - 1j)) / np.sinh(gamma / 2 * (root + 1j))\n c = 1j * np.sin(gamma) / np.sinh(gamma / 2 * (root + 1j))\n r_matrix[1, 1] = r_matrix[2, 2] = c\n r_matrix[1, 2] = r_matrix[2, 1] = b\n return r_matrix", "def _init_transformation_matrix(self):\n # Set up basic transformation matrix\n c_transform = np.zeros((self.n_beads, self.n_beads))\n\n # Get auxiliary array with bead indices\n n = np.arange(1, self.n_beads + 1)\n\n # for k = 0\n c_transform[0, :] = 1.0\n\n for k in range(1, self.n_beads // 2 + 1):\n c_transform[k, :] = np.sqrt(2) * np.cos(2 * np.pi * k * n / self.n_beads)\n\n for k in range(self.n_beads // 2 + 1, self.n_beads):\n c_transform[k, :] = np.sqrt(2) * np.sin(2 * np.pi * k * n / self.n_beads)\n\n if self.n_beads % 2 == 0:\n c_transform[self.n_beads // 2, :] = (-1) ** n\n\n # Since matrix is initialized as C(k,n) does not need to be transposed\n c_transform /= np.sqrt(self.n_beads)\n c_transform = torch.from_numpy(c_transform)\n\n return c_transform", "def L1U(A, d):\n n = shape(A)[0]\n L = eye(n)\n U = matrix(zeros((n,n))); U[0,0] = A[0,0]\n for k in range(1,n):\n km = array([0, k - d]).max()\n if km < k:\n L[k, km:k] = A[k, km:k]\n rforwardsolve(U[km:k, km:k].T, L[k, km:k].T, d) # L\n U[km:(k + 1), k] = A[km:(k + 1), k]\n rforwardsolve(L[km:(k + 1), km:(k + 1)], U[km:(k + 1), k], d) # U\n return L, U", "def laplacian(W, normalized=False):\r\n # Degree matrix.\r\n d = W.sum(axis=0)\r\n # Laplacian matrix.\r\n if not normalized:\r\n D = scipy.sparse.diags(d.A.squeeze(), 0)\r\n L = D - W\r\n else:\r\n # d += np.spacing(np.array(0, W.dtype))\r\n d = 1 / np.sqrt(d)\r\n D = scipy.sparse.diags(d.A.squeeze(), 0)\r\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\r\n L = I - D * W * D\r\n\r\n # assert np.abs(L - L.T).mean() < 1e-9\r\n assert type(L) is scipy.sparse.csr.csr_matrix\r\n return L", "def bloch_matrix(self):\n if self.gf_r is None:\n self.gf()\n\n return -self.gf_r.dot(self.lead[1])", "def prepotential(G):\n L = nx.laplacian_matrix(G).toarray()\n L_tild = L[1:,1:]\n T_tild = np.linalg.inv(L_tild)\n T = np.zeros(L.shape)\n T[1:,1:] = T_tild\n return T", "def _buildMatrix(self, SparseMatrix, Ncells, MaxFaces, coeff):\n return (0, 0)", "def CreateMatrix(self) -> BaseMatrix:", "def CreateMatrix(self) -> BaseMatrix:", "def LU(A):\n m, n = A.shape\n L, U = np.zeros([m, n]), np.zeros([m, n])\n for i in range(n):\n L[i][i] = 1\n\n for i in range(n):\n\n # Upper triangular matrix\n for j in range(i, n):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*U[k][j]\n U[i][j] = A[i][j] - summ\n\n # Lower triangular matrix\n for j in range(i+1, n):\n summ = 0\n for k in range(0, i):\n summ += L[j][k]*U[k][i]\n L[j][i] = (A[j][i] - summ)/U[i][i]\n return L, U", "def construct_linear_system(self):\n N=self.grid.Ncells()\n Nbc = len(self.dirichlet_bcs)\n self.Ncalc=Ncalc = N - Nbc\n\n # map cells to forced values\n dirichlet = dict( [ (c,v) for c,v,xy in self.dirichlet_bcs])\n\n self.is_calc_c = is_calc_c = np.ones(N,np.bool8)\n for c,v,xy in self.dirichlet_bcs:\n is_calc_c[c] = False\n\n # is_calc_c[self.c_mask] = False\n\n # c_map is indexed by real cell indices, and returns the matrix index\n c_map = self.c_map = np.zeros(N,np.int32)\n self.c_map[is_calc_c] = np.arange(Ncalc)\n\n dzc=self.dzc\n dzf=self.dzf\n area_c=self.area_c\n\n meth='coo' # 'dok'\n if meth == 'dok':\n A=sparse.dok_matrix((Ncalc,Ncalc),np.float64)\n else:\n # construct the matrix from a sequence of indices and values\n ij=[]\n values=[] # successive value for the same i.j will be summed\n \n b = np.zeros(Ncalc,np.float64)\n flux_per_gradient_j = -self.K_j * self.l_j * dzf / self.d_j * self.dt\n\n self.grid.edge_to_cells() # makes sure that edges['cells'] exists.\n \n for j in range(self.grid.Nedges()):\n e = self.grid.edges[j]\n ic1,ic2 = e['cells']\n \n if ic1<0 or ic2<0 or e['deleted']:\n continue # boundary edge, or deleted edge\n \n flux_per_gradient=flux_per_gradient_j[j]\n \n # this is the desired operation:\n # Cdiff[ic1] -= flux_per_gradient / (An[ic1]*dzc) * (C[ic2] - C[ic1])\n # Cdiff[ic2] += flux_per_gradient / (An[ic2]*dzc) * (C[ic2] - C[ic1])\n # Where Cdiff is row, C is col\n\n if is_calc_c[ic1] and is_calc_c[ic2]:\n mic2 = c_map[ic2]\n mic1 = c_map[ic1]\n v1=flux_per_gradient / (area_c[ic1]*dzc[ic1])\n v2=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n \n if meth == 'dok':\n A[mic1,mic2] -= v1\n A[mic1,mic1] += v1\n A[mic2,mic2] += v2\n A[mic2,mic1] -= v2\n else:\n ij.append( (mic1,mic2) ) ; values.append(-v1)\n ij.append( (mic1,mic1) ) ; values.append(v1)\n ij.append( (mic2,mic2) ) ; values.append(v1)\n ij.append( (mic2,mic1) ) ; values.append(-v1)\n \n elif not ( is_calc_c[ic1] or is_calc_c[ic2] ):\n # both are dirichlet, so nothing to do\n pass\n elif not is_calc_c[ic2]:\n mic1 = c_map[ic1]\n v=flux_per_gradient / (self.area_c[ic1]*dzc[ic1])\n if meth == 'dok':\n A[mic1,mic1] += v\n else:\n ij.append( (mic1,mic1) )\n values.append(v)\n\n # roughly\n # A[1,1]*x[1] + A[1,2]*x[2] + ... = b[1]\n # but we already know x[2],\n # A[1,1]*x[1] + ... = b[1] - A[1,2]*x[2]\n # so flip the sign, multiply by known dirichlet value, and\n # add to the RHS\n b[mic1] += flux_per_gradient / (area_c[ic1]*dzc[ic1]) * dirichlet[ic2]\n else: # not is_calc_c[c1]\n mic2 = c_map[ic2]\n # A[mic2,mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2])\n # A[mic2,mic1] -= flux_per_gradient / (area_c[ic2]*dzc[ic2])\n\n # A[mic2,mic2]*x[2] + A[mic2,mic1]*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] - flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] = b[2] + flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1]\n v=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n if meth == 'dok':\n A[mic2,mic2] += v\n else:\n ij.append( (mic2,mic2) )\n values.append(v)\n b[mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2]) * dirichlet[ic1]\n\n # Used to test 'is not 0:' but modern python complains\n if isinstance(self.alpha,np.ndarray): \n for c in range(N):\n if self.is_calc_c[c]:\n mic=self.c_map[c]\n v=self.alpha[c]*self.dt\n if meth == 'dok':\n A[mic,mic] -= v\n else:\n ij.append( (mic,mic) )\n values.append(-v)\n\n # Flux boundary conditions:\n for ic,value,xy in self.neumann_bcs:\n mic=c_map[ic]\n # make mass/time into concentration/step\n # arrived at minus sign by trial and error.\n # 2023-08-04: there was a bug here that used ic2 instead of ic.\n b[mic] -= value/(area_c[ic]*dzc[ic]) * self.dt\n\n if meth == 'dok':\n self.A = sparse.coo_matrix(A)\n else:\n ijs=np.array(ij,dtype=np.int32)\n data=np.array(values,dtype=np.float64)\n A=sparse.coo_matrix( (data, (ijs[:,0],ijs[:,1]) ), shape=(Ncalc,Ncalc) )\n self.A=A\n \n # report scale to get a sense of whether dt is too large\n Ascale = A.diagonal().min()\n log.debug(\"Ascale is %s\"%Ascale)\n\n self.b = b", "def laplace_matrix(self):\n n = self.number_of_vertices\n laplace_matrix = np.zeros((n, n))\n for i in range(n):\n laplace_matrix[i][i] = 1\n vertice = self.list_of_vertices[i]\n for edge in vertice.edges_list:\n laplace_matrix[i][edge.linked[1].index] = 1\n return laplace_matrix", "def cal_L(self):\n # calculate the l matrix\n self.point_matrixs = self.point_matrix.reshape(\n self.point_matrix.shape[0], 1, self.point_matrix.shape[1])\n self.point_matrixs = np.tile(self.point_matrixs,\n (self.attach_points.shape[0], 1))\n self.attach_points_matrix = np.matlib.repmat(\n self.attach_points[:, 0:3], self.point_matrix.shape[0], 1)\n self.attach_points_matrix = self.attach_points_matrix.reshape(\n self.point_matrix.shape[0], self.attach_points.shape[0], 3)\n self.L = np.subtract(self.attach_points_matrix,\n self.point_matrixs)\n # self.L[:,self.attach_points[:,3]==1,:] = \\\n # - self.L[:,self.attach_points[:,3]==1,:]\n # print(self.L)", "def relaxation_matrix(self, uphill, downhill):\n world.KK = numpy.zeros((2,2), dtype=numpy.float64)\n Kup = 1.0/float(uphill)\n world.KK[0,0] = -Kup\n world.KK[1,0] = Kup\n Kdn = 1.0/float(downhill)\n world.KK[0,1] = Kdn\n world.KK[1,1] = -Kdn", "def lu_decomposition(self):\n if self.rows_count() != self.columns_count():\n raise ValueError(\"Matrix needs to me square for LU decomposition.\")\n for i in range(self.rows_count() - 1):\n for j in range(i + 1, self.rows_count()):\n if self[i, i] == 0: # or abs(self[i, i]) <= 0.000001):\n raise ValueError(\"Can't divide by 0\")\n self[j, i] = self[j, i] / self[i, i]\n for k in range(i + 1, self.rows_count()):\n self[j, k] -= self[j, i] * self[i, k]", "def laplacian(W, normalized=True):\n\n # Degree matrix.\n d = W.sum(dim=0)\n\n # Laplacian matrix.\n if not normalized:\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n L = D - W\n else:\n # d += np.spacing(np.array(0, W.dtype))\n d = 1 / torch.sqrt(d)\n D = torch.diags(d.A.squeeze(), 0)\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\n L = I - D * W * D\n\n # assert np.abs(L - L.T).mean() < 1e-9\n assert type(L) is scipy.sparse.csr.csr_matrix\n return L", "def construct_matrix_A(n):\n n2 = n**2\n D0 = 4*np.ones(n2) # 0th diagonal\n D1 = - np.ones(n2 - 1) # -1st, 1st diagonals\n D1[n-1::n] = 0 # Setting every k*n-1 entries = 0 for k < n\n DN = - np.ones(n2 - n) # -nth, nth diagonals\n return scipy.sparse.diags((DN, D1, D0, D1, DN), (-n, -1, 0, 1, n),\n shape=(n2, n2), format=\"csr\")", "def buildLaplacianPyramid(guassianPyramid):\n pyramid = []\n for i in range(len(guassianPyramid)-1):\n Gi = upsample(guassianPyramid[i+1])\n G = guassianPyramid[i]\n r, c = G.shape[:2]\n L = G - Gi[:r, :c]\n pyramid.append(L)\n\n pyramid.append(guassianPyramid[-1])\n return pyramid", "def symmetrized_normalized_laplacian(degree_vector, weight_matrix, length):\n holders = np.zeros((length, 1))\n holders[:, 0] = np.sqrt(1 / degree_vector)\n\n return np.eye(length) - holders * weight_matrix * holders.T", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def laplacian2(A, laplacian_type='raw'):\r\n\r\n N = A.shape[0]\r\n # TODO: Raise exception if A is not square\r\n\r\n degrees = A.sum(1)\r\n # To deal with loops, must extract diagonal part of A\r\n diagw = np.diag(A)\r\n\r\n # w will consist of non-diagonal entries only\r\n ni2, nj2 = A.nonzero()\r\n w2 = A[ni2, nj2]\r\n ndind = (ni2 != nj2).nonzero() # Non-diagonal indices\r\n ni = ni2[ndind]\r\n nj = nj2[ndind]\r\n w = w2[ndind]\r\n\r\n di = np.arange(N) # diagonal indices\r\n\r\n if laplacian_type == 'raw':\r\n # non-normalized laplaciand L = D - A\r\n L = np.diag(degrees - diagw)\r\n L[ni, nj] = -w\r\n L = lil_matrix(L)\r\n elif laplacian_type == 'normalized':\r\n # TODO: Implement the normalized laplacian case\r\n # % normalized laplacian D^(-1/2)*(D-A)*D^(-1/2)\r\n # % diagonal entries\r\n # dL=(1-diagw./degrees); % will produce NaN for degrees==0 locations\r\n # dL(degrees==0)=0;% which will be fixed here\r\n # % nondiagonal entries\r\n # ndL=-w./vec( sqrt(degrees(ni).*degrees(nj)) );\r\n # L=sparse([ni;di],[nj;di],[ndL;dL],N,N);\r\n print(\"Not implemented\")\r\n else:\r\n # TODO: Raise an exception\r\n print(\"Don't know what to do\")\r\n\r\n return L", "def get_l(m):\n L = m.copy()\n for i in range(L.shape[0]):\n L[i, i] = 1\n L[i, i+1:] = 0\n return np.matrix(L)", "def lup_decomposition(self):\n p = [i for i in range(self.rows_count())]\n for i in range(self.rows_count() - 1):\n pivot = i\n for j in range(i + 1, self.rows_count()):\n if abs(self[p[j], i]) > abs(self[p[pivot], i]):\n pivot = j\n p[pivot], p[i] = p[i], p[pivot]\n for j in range(i + 1, self.rows_count()):\n if abs(self[p[i], i]) < math.pow(10, -6):\n raise ValueError(\"Can't divide by 0\")\n self[p[j], i] /= self[p[i], i]\n for k in range(i + 1, self.rows_count()):\n self[p[j], k] -= self[p[j], i] * self[p[i], k]\n lst = []\n for i in p:\n lst.append(self.value[i])\n return p, Matrix(lst)", "def lizardite():\n\n rho = 2610.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 245.; C[0,1] = 50.; C[0,2] = 31.; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 245.; C[1,2] = 31.; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 23.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 11.6; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 11.6; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 97.5\n\n return C, rho", "def get_feature_oriented_matrix(self):\n nbr_features = self.hyperparameters.time_series_depth\n matrix = np.ones(shape=(nbr_features, nbr_features), dtype=np.float)\n np.fill_diagonal(matrix, val=0)\n return matrix", "def lform(self):\n a, c, d, b = self.to_ccw()\n if b < c:\n a += b\n b -= b\n c -= b\n d += b\n else:\n a += c\n b -= c\n c -= c\n d += c\n return self.__class__.from_ccw(a, c, d, b)", "def build_lower_zeros(self):\r\n for row in range(self.SIZE - 1, 0, -1):\r\n self.__obtain_zero(row, 0, 0)\r\n\r\n for col in range(1, self.SIZE - 1):\r\n for row in range(self.SIZE - 1, col, -1):\r\n self.__obtain_zero(row, col, row - 1)", "def lawsonite():\n\n rho = 3090.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 214.; C[0,1] = 69.; C[0,2] = 82.; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 226.; C[1,2] = 65.; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 259.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 60.; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 65.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 17.\n\n return C, rho", "def lMatrix(a, b, c, alpha, beta, gamma, convention=None):\n lMatrix = np.zeros((3, 3))\n\n cosAlpha = np.cos(alpha)\n cosBeta = np.cos(beta)\n cosGamma = np.cos(gamma)\n\n sinGamma = np.sin(gamma)\n\n lMatrix[0, 0] = a\n lMatrix[0, 1] = b * cosGamma\n lMatrix[0, 2] = c * cosBeta\n\n lMatrix[1, 1] = b * sinGamma\n lMatrix[1, 2] = c * (cosAlpha - cosBeta * cosGamma) / sinGamma\n\n lMatrix[2, 2] = c * np.sqrt(\n 1 + 2 * cosAlpha * cosBeta * cosGamma -\n cosAlpha**2 - cosBeta**2 - cosGamma**2\n ) / sinGamma\n\n # OI/HKL convention - x // [10-10], y // a2 [-12-10]\n # TSL convention - x // a1 [2-1-10], y // [01-10]\n if convention is None:\n convention = defaults['crystal_ortho_conv']\n\n if convention.lower() in ['hkl', 'oi']:\n # Swap 00 with 11 and 01 with 10 due to how OI orthonormalises\n # From Brad Wynne\n t1 = lMatrix[0, 0]\n t2 = lMatrix[1, 0]\n\n lMatrix[0, 0] = lMatrix[1, 1]\n lMatrix[1, 0] = lMatrix[0, 1]\n\n lMatrix[1, 1] = t1\n lMatrix[0, 1] = t2\n\n elif convention.lower() != 'tsl':\n raise ValueError(\n f\"Unknown convention '{convention}' for orthonormalisation of \"\n f\"crystal structure, can be 'hkl' or 'tsl'\"\n )\n\n # Set small components to 0\n lMatrix[np.abs(lMatrix) < 1e-10] = 0\n\n return lMatrix", "def matrix(self, full=False, keeppads=True):\n\n v = np.fft.hfft(self._u, n=self.N) / self.N\n idx = sum(np.ogrid[0:self.N, -self.N:0])\n C = v[idx] # follow scipy.linalg.{circulant,toeplitz,hankel}\n\n if keeppads:\n a = self._yfac_.copy()\n b = self._xfac_.copy()\n else:\n a = self.yfac.copy()\n b = self.xfac.copy()\n C = self._unpad(C, 0, True)\n C = self._unpad(C, 1, False)\n a = a.reshape(-1, 1)\n\n if not full:\n return a, b, C\n else:\n return a * C * b", "def reconstructFromLaplacianPyramid(pyramid):\n \n nLevels = len(pyramid)\n out = pyramid[-1]\n if len(pyramid) == 1:\n return out\n\n useStack = False\n if pyramid[0].shape[0:2] == pyramid[-1].shape[0:2]:\n useStack = True\n\n dtp = out.dtype\n for i in range(nLevels-2,-1,-1):\n newSz = pyramid[i].shape[0:2]\n if useStack:\n up = out\n else:\n up = cv2.pyrUp(out,dstsize=(newSz[1],newSz[0]))\n if len(up.shape) < 3:\n up.shape += (1,)\n out = up + pyramid[i]\n out = out.astype(dtp)\n\n return out", "def _set_ls_matrices(self):\n zz_t = self.z_matrix * self.z_matrix.transpose()\n l, s, l_t = np.linalg.svd(zz_t)\n s[self.p:] = 0\n self.l_matrix = np.matrix(l)\n self.s_matirx = np.matrix(np.diag(s))", "def mult_L(self) -> np.ndarray:\n return np.array([\n [self.w, -self.x, -self.y, -self.z],\n [self.x, self.w, -self.z, self.y],\n [self.y, self.z, self.w, -self.x],\n [self.z, -self.y, self.x, self.w]])", "def mk_single_diffy():\n # make matrix:\n mat = zeros((M, M), dtype='d')\n for m in range(M):\n for p in range(m+1, M, 2):\n mat[m,p] = 2*p*oneOverC[m]\n\n return mat", "def matrices(self):\n # Creating L\n L = scipy.sparse.diags((self.inv_dx2, -2*self.inv_dx2, self.inv_dx2, 1),\n (-(self.N+1), -self.N, -(self.N-1), self.N),\n shape=(2*self.N, 2*self.N), dtype=np.complex128)\n self.L = scipy.sparse.csr_matrix(L)\n self.L[-(self.N+1), 0], self.L[-1, -self.N] = 0, 0\n\n # Computing largest eigenvalue of L explicitely:\n self.mu_max = self.inv_dx*np.sqrt(2*(1 + np.cos(np.pi/(self.N+1))))\n\n # Creating K\n self.K = scipy.sparse.diags((-self.inv_dx2, 2*self.inv_dx2, -self.inv_dx2),\n (-1, 0, 1), # Diagonals\n shape=(self.N, self.N), # Size of matrix\n dtype=np.complex128)", "def get_incorrect_distance_matrix(L):\n n = len(L)\n D = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n if i != j:\n D[i][j] = get_minor(L, [i], [j]) / get_minor(L, [i], [i])\n return D", "def get_L(self, tolerance=None):\r\n index = self.data.index\r\n columns = self.data.columns\r\n\r\n # Obtain the eigenvalues and eigenvectors\r\n E, V = sandy.CategoryCov(self.data).get_eig(tolerance=tolerance)\r\n\r\n # need sparse because much faster for large matrices (2kx2k from J33 Pu9)\r\n # with a lot of zero eigs\r\n # this is exactly equivalent to V.values @ np.diag(np.sqrt(E.values))\r\n A = (sparse.csr_matrix(V.values) @ sparse.csr_matrix(np.diag(np.sqrt(E.values)))).todense()\r\n \r\n# u, s, vh = self.get_svd()\r\n# A = (sparse.csr_matrix(u) @ sparse.csr_matrix(np.diag(np.sqrt(s)))).todense()\r\n\r\n # QR decomposition\r\n Q, R = scipy.linalg.qr(A.T)\r\n L = R.T\r\n\r\n return pd.DataFrame(L, index=index, columns=columns)", "def lherzolite():\n\n rho = 3270.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2\n C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83\n\n return C, rho", "def build_matrix(self, rawdata):\n\t\tN = len(rawdata)\n\t\ts = set()\n\t\tfor d in rawdata:\n\t\t\ts.update(d[7])\n\t\tK = len(s)\n\t\tself.I = dict((j, i) for (i, j) in enumerate(s))\n\t\tself.I_rev = dict((i, j) for (i, j) in enumerate(s))\n\t\tD = sp.dok_matrix((N, K))\n\t\tself.t = np.zeros(N)\n\t\tprint \"Empty %i x %i matrix created\" % (N, K)\n\t\t\n\t\tfor n,d in enumerate(rawdata):\n\t\t\tif len(d[7]) == 1:\n\t\t\t\tD[n,self.I[d[7][0]]] = 1# d[6] - d[5]\n\t\t\telse:\n\t\t\t\tD[n, self.I[d[7][0]]] = 1 # d[5]\n\t\t\t\tD[n, self.I[d[7][-1]]] = 1 # d[6]\n\t\t\t\tfor eid in d[7][1:-1]:\n\t\t\t\t\tD[n, self.I[eid]] = 1.0\n\t\t\tself.t[n] = float(d[2] - d[1])\n\t\tprint \"DOK matrix built.\"\n\t\t\n\t\tself.M = sp.csr_matrix(D)\n\t\tprint \"DOK to CSR conversion done.\"\n\t\tself.N = N\n\t\tself.K = K", "def _prepare_outer_matrix(self):\n self._mat_plane = numpy.array([\n self._scaling[0], 0, 0, 0,\n 0, self._scaling[1], 0, 0,\n 0, 0, 1, 0,\n self.i_border[0], -self.i_border[1], 0, 1\n ], dtype=numpy.float32)", "def generate_two_dim_step_matrices(self):\n Lap_h = two_dim_sparse_neumann_laplacian(self.M, format='csc')\n I_m = sp.identity(self.M * self.M, dtype='float64', format='csc')\n\n I_minus_Lap = spla.factorized(I_m - (self.r / 2.0) * Lap_h)\n I_plus_Lap = I_m + (self.r / 2.0) * Lap_h\n\n return I_minus_Lap, I_plus_Lap", "def delaunay_triangulate(P: np.ndarray):\n n = P.shape[0]\n if n < 3:\n A = np.ones((n, n)) - np.eye(n)\n else:\n try:\n d = Delaunay(P)\n A = np.zeros((n, n))\n for simplex in d.simplices:\n for pair in itertools.permutations(simplex, 2):\n A[pair] = 1\n except QhullError as err:\n print(\"Delaunay triangulation error detected. Return fully-connected graph.\")\n print(\"Traceback:\")\n print(err)\n A = np.ones((n, n)) - np.eye(n)\n return A", "def make_lcp(A,F=0.25):\n\n # [x, b] = MAKE_LCP(A): Make LCP\n #\n # INPUT:\n #\n # A -- The coefficient matrix in the LCP\n # F -- The fraction of zero-values in the x-solution.\n #\n # OUTPUT:\n #\n # x -- A solution for the LCP problem.\n # b -- The right hand side vector\n #\n # Port of Kenny Erleben, DIKU 2011 Matlab code to Python\n\n ##### Get of number of variable ##########\n N = np.size(A,0) # Pick a dimension, it should be NxN\n\n ##### Generate a random LCP solution ##########\n x = np.random.uniform(0,1,(N,1))\n x[x < F] = 0\n\n # x = np.real_if_close(x)\n\n ##### Generate a right-hand-side vector that is ##########\n ##### consistent with a random solution ##########\n b = np.zeros((N,1))\n s = np.real_if_close(np.dot(A,x))\n b[x>0] = -s[x>0]\n\n return (x, b)", "def _build_laplacian_pyramid(im, max_levels, filter_size):\n filter_vec = _gaussian_kernel_1d(filter_size).reshape(filter_size, 1)\n pyr = []\n im_curr = im\n im_next = _reduce(im, filter_vec)\n while _image_is_large_enough(im_next) and len(pyr) < max_levels - 1:\n pyr.append(im_curr - _expand(im_next, filter_vec))\n im_curr = im_next\n im_next = _reduce(im_curr, filter_vec)\n\n pyr.append(im_curr)\n\n return pyr, filter_vec.reshape(1, filter_size)", "def mat_from_diag_triu_tril(diag, tri_upp, tri_low):\n n = diag.shape[-1]\n (i,) = diag_indices(n, ndim=1)\n j, k = triu_indices(n, k=1)\n mat = _torch.zeros((diag.shape + (n,)))\n mat[..., i, i] = diag\n mat[..., j, k] = tri_upp\n mat[..., k, j] = tri_low\n return mat", "def cholesky(A):\n n = len(A)\n\n # Create zero matrix for L\n L=np.zeros((n,n))\n\n # Perform the Cholesky decomposition\n for i in range(n):\n for k in range(i+1):\n tmp_sum = sum(L[i][j] * L[k][j] for j in xrange(k))\n \n if (i == k): # Diagonal elements\n\n L[i][k] = math.sqrt(A[i][i] - tmp_sum)\n else:\n\n L[i][k] = (1.0 / L[k][k] * (A[i][k] - tmp_sum))\n return L", "def tridiag_matrix(bc_surface_type, upsilon, space_divisions, dx, k, T, h, hc, emissivity, sigma):\n # create tri-diagonal matrix\n A = np.diagflat([-upsilon for i in range(space_divisions - 1)], -1) +\\\n np.diagflat([1 + 2 * upsilon for i in range(space_divisions)]) +\\\n np.diagflat([-upsilon for i in range(space_divisions - 1)], 1)\n\n # adjust matrix depending on the boundary condition at the exposed surface\n if bc_surface_type == \"linear\":\n A[0,0] = 1 + 2*upsilon + 2*upsilon*dx*h/k\n A[0,1] = -2*upsilon\n \n elif bc_surface_type == \"non-linear\":\n A[0,0] = 1 + 2*upsilon + 2*dx*hc*upsilon/k+ 8*emissivity*sigma*dx*upsilon*T[0]**3/k\n A[0,1] = -2*upsilon\n \n # adjust matrix for the back boundary conditions\n A[-1, -2] = - 2 * upsilon\n A[-1, -1] = 1 + 2 * upsilon\n\n return A", "def laplacian(self, array_in):\r\n\r\n # Call-through to Laplacian operator, already computed\r\n return self.laplace_op*array_in", "def build_linear_diags(self):\n N = self.N\n dx = self.dx\n j = self._j # Index of the mid-point\n\n diags = np.zeros((2*self._j+1, self.N))\n\n # Advection term\n cff1 = -1/(2*dx)\n\n # Need to stagger these diagonals so lower and upper bands are symmetric\n diags[j-1, :-2] += -1*cff1*self.c[2:]\n diags[j+1, :] += 1*cff1*self.c[:]\n\n # Sponge term\n x = np.arange(0,N*dx,dx)\n rdist = x[-1] - x # Distance from right boundary\n spongefac = -np.exp(-6*rdist/self.spongedist)/self.spongetime\n diags[j,:] += spongefac \n\n return diags", "def make_mat_lp_le(lin_pot_mesh, lin_geo_mesh):\n num_nodes = lin_pot_mesh.get_nodes().shape[0]\n K = np.zeros((3 * num_nodes, 3 * num_nodes))\n add_lp_le_DL_terms(K, lin_pot_mesh, lin_geo_mesh)\n add_lp_le_RBM_terms(K, lin_pot_mesh, lin_geo_mesh)\n return K", "def build_upper_zeros(self):\r\n for row in range(0, self.SIZE - 1):\r\n self.__obtain_zero(row, self.SIZE - 1, self.SIZE - 1)\r\n\r\n for col in range(self.SIZE - 2, 0, -1):\r\n for row in range(0, col):\r\n self.__obtain_zero(row, col, row + 1)", "def _build(self):\n ary = np.zeros( (3,3,3), float )\n ary[0,0,0] = ary[1,1,1] = ary[0,1,2] = ary[1,0,2] = 1.\n ary[0,2,0] = ary[0,2,2] = ary[2,0,0] = ary[2,0,2] = 0.5\n ary[1,2,1] = ary[1,2,2] = ary[2,1,1] = ary[2,1,2] = 0.5\n ary[2,2,0] = ary[2,2,1] = 0.25\n ary[2,2,2] = 0.5\n return ary", "def calc_Ls(self, x_surface, geom):\n\n return np.zeros((self.n_wl,))", "def _build_sparse_matrix(L):\n shape = L.shape\n i = torch.LongTensor(np.vstack((L.row, L.col)).astype(int))\n v = torch.FloatTensor(L.data)\n return torch.sparse.FloatTensor(i, v, torch.Size(shape))", "def build_mat(self):\n for row, s in enumerate(self.S):\n for col, t in enumerate(self.T):\n\n if self.symmetric and row > col:\n pass\n\n else:\n self.mat[row, col] = self.kernel(s, t, self.n)\n\n if self.symmetric:\n self.mat = self.symmetrize(self.mat)\n else:\n for idx, s in enumerate(self.S):\n self.test_normalization[idx] = self.kernel(s, s, self.n)", "def make_state_appliable(state):\n size = len(state)\n st_appl = np.zeros((size, size), dtype=complex)\n for p1 in range(size):\n for p2 in range(size):\n st_appl[p1, p2] = state[p1, p2] * sqrt(factorial(p1) * factorial(p2))\n return st_appl", "def test_LU(self):\n A = np.random.rand(10, 10)\n MA = to_matrix(A)\n ML, MU = MA.decomposeLU()\n self.assertEqual(ML*MU, MA)\n self.assertTrue(ML.is_lower_triangular())\n self.assertTrue(MU.is_upper_triangular())", "def _makeDerivativeMatrix(self, index, a):\n num_neurons = a\n jaccob_matrix = np.zeros(shape=(num_neurons, num_neurons)) # ie S=3, shape 3X3\n # dx_func = self.__getDerivative(self.layers[index]['trans_func'])\n dx_func = self._getTransFunc(self.layers[index]['trans_func']).derivative\n for i in range(num_neurons):\n # diagonal matrix\n a_val = self.layers[index]['a_output'][i]\n jaccob_matrix[i][i] = dx_func(a_val)\n return jaccob_matrix", "def lcr_matrix(H):\n if H.ndim != 2 or H.shape[0] != H.shape[1]:\n raise ValueError('H should be a square matrix')\n\n leverages = sqrt(1-H.diagonal())\n leverages = leverages[:, None]\n R = (eye(len(H)) - H) / leverages\n return R - R.mean(0)", "def finite_difference(n):\n B=sparse.diags([1,-4,1],[-1,0,1],shape=(n,n))\n A=sparse.block_diag([B for i in xrange(0,n)])\n A.setdiag(1,k=-n)\n A.setdiag(1,k=n) \n b=[-100]+[0 for i in xrange(1,n-1)]+[-100]\n b=b*n\n return A,b", "def diag_to_matrix(l_and_u, diag):\n l, u = l_and_u\n dim = diag.shape[1]\n matrix = np.zeros((dim, dim))\n for i in range(l+u+1):\n np.fill_diagonal(\n matrix[max(0,i-u):,max(0,u-i):],\n diag[i,max(0,u-i):]\n )\n return matrix", "def build_mat(self,diag_entries=False):\n if self.L is None:\n raise ValueError('Must set number of spins (Operator.L) before building PETSc matrix.')\n\n self.destroy_mat()\n\n term_array = self.get_MSC()\n\n if diag_entries and not np.any(term_array['masks'] == 0):\n term_array = np.hstack([np.array([(0,0,0)],dtype=MSC_dtype),term_array])\n\n if not np.any(term_array['masks'] == 0):\n self._diag_entries = False\n else:\n self._diag_entries = True\n\n self._mat = build_mat(self.L,\n np.ascontiguousarray(term_array['masks']),\n np.ascontiguousarray(term_array['signs']),\n np.ascontiguousarray(term_array['coeffs']),\n bool(self.use_shell),\n self.use_shell == 'gpu')", "def lu(matrix):\n SIZE = matrix.shape[0]\n BS = np.BLOCKSIZE\n\n if matrix.shape[0] != matrix.shape[0]:\n raise Exception(\"LU only supports squared matricis\")\n if not matrix.dist():\n raise Exception(\"The matrix is not distributed\")\n\n if(SIZE % np.BLOCKSIZE != 0):\n raise Exception(\"The matrix dimensions must be divisible \"\\\n \"with np.BLOCKSIZE(%d)\"%np.BLOCKSIZE)\n\n (prow,pcol) = matrix.pgrid()\n A = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True);A += matrix\n L = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n U = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpL = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpU = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n for k in xrange(0,SIZE,BS):\n bs = min(BS,SIZE - k) #Current block size\n kb = k / BS # k as block index\n\n #Compute vertical multiplier\n slice = ((kb,kb+1),(kb,kb+1))\n for a,l,u in zip(A.blocks(slice), L.blocks(slice), U.blocks(slice)):\n (p,tl,tu) = linalg.lu(a)\n if not (np.diag(p) == 1).all():#We do not support pivoting\n raise Exception(\"Pivoting was needed!\")\n #There seems to be a transpose bug in SciPy's LU\n l[:] = tl.T\n u[:] = tu.T\n\n #Replicate diagonal block horizontal and vertical\n for tk in xrange(k+bs,SIZE,BS):\n tbs = min(BS,SIZE - tk) #Current block size\n L[tk:tk+tbs,k:k+bs] = U[k:k+tbs,k:k+bs]\n U[k:k+bs,tk:tk+tbs] = L[k:k+bs,k:k+tbs]\n\n if k+bs < SIZE:\n #Compute horizontal multiplier\n slice = ((kb,kb+1),(kb+1,SIZE/BS))\n for a,u in zip(A.blocks(slice), U.blocks(slice)):\n u[:] = np.linalg.solve(u.T,a.T).T\n\n #Compute vertical multiplier\n slice = ((kb+1,SIZE/BS),(kb,kb+1))\n for a,l in zip(A.blocks(slice), L.blocks(slice)):\n l[:] = np.linalg.solve(l,a)\n\n #Apply to remaining submatrix\n A -= pyHPC.summa(L[:,:k+bs],U[:k+bs,:], ao=(k+bs,k),\n bo=(k,k+bs), co=(k+bs,k+bs))\n\n return (L, U)", "def build_lower_zeros_with_pivoting(self):\r\n self.__apply_pivoting(0, self.SIZE - 1, 0)\r\n for row in range(self.SIZE - 1, 0, -1):\r\n self.__obtain_zero(row, 0, 0)\r\n\r\n for col in range(1, self.SIZE - 1):\r\n self.__apply_pivoting(col, self.SIZE - 1, col)\r\n for row in range(self.SIZE - 1, col, -1):\r\n self.__obtain_zero(row, col, row - 1)", "def build_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.matrix[row].append(self.result[row])", "def get_initial_condition_euler(self, tol=1e-10):\n Z = zeros((len(self._meshes), len(self._meshes[0].elements)+1))\n for mi, m in enumerate(self._meshes):\n if not m._left_lift:\n raise Exception(\"get_initial_condition_euler() only works if all boundary conditions are given on the left.\")\n\n Z[mi, 0] = m._left_value\n def get_F(Z, t):\n \"\"\"\n Evaluates the RHS for the vector Z and time tau.\n \"\"\"\n Z0 = zeros((len(self._meshes),))\n for mi, m in enumerate(self._meshes):\n Z0[mi] = self._F(mi, Z, t)\n return Z0\n def get_phi(Z, Zprev, tau, t):\n return Z - tau*get_F(Z, t) - Zprev\n def get_J(Z, tau, t):\n mat = eye(len(self._meshes))\n for i in range(len(self._meshes)):\n for j in range(len(self._meshes)):\n mat[i, j] += - tau*self._DFDY(i, j, Z, t)\n return mat\n\n # initial time and initial condition vector:\n tprev = self._meshes[0].elements[0].nodes[0].x\n Zprev = Z[:, 0].copy()\n Znext = Zprev[:].copy()\n for el_i in range(len(self._meshes[0].elements)):\n #print \"doing element:\", el_i\n tau = self._meshes[0].elements[el_i].length\n tnext = tprev + tau\n error = 1e10\n i = 0\n while error > tol:\n J = get_J(Zprev, tau, tprev)\n phi = get_phi(Znext, Zprev, tau, tprev)\n dZ = solve(J, -phi)\n Znext += dZ\n error_dZ = l2_norm(dZ)\n error_phi = l2_norm(get_phi(Znext, Zprev, tau, tnext))\n #print \"it=%d, l2_norm_dZ=%e, l2_norm_phi=%e\" % \\\n # (i, error_dZ, error_phi)\n error = max(error_dZ, error_phi)\n i += 1\n Z[:, el_i+1] = Znext[:].copy()\n Zprev = Znext[:].copy()\n tprev = tnext\n\n\n # now assign the Z to the vertex dofs and leave zeros in the bubbles\n Y = zeros((self.ndofs,))\n for mi, m in enumerate(self._meshes):\n coeffs_one_mesh = Z[mi, 1:]\n Y[m.dof_start:m.dof_start+len(coeffs_one_mesh)] = coeffs_one_mesh\n return Y", "def generate_LLL_matrix(self, matrix):\n LLL_matrix = matrix.transpose().LLL().transpose()\n return LLL_matrix", "def matrix_L1(l, omega, S, cn):\n zt = omega * S / cn['t']\n L = np.array((dN2(l, zt), dN4(l, zt)))\n return L.T" ]
[ "0.680923", "0.6638274", "0.62569284", "0.62069863", "0.61390823", "0.6101195", "0.59771115", "0.58065456", "0.5798872", "0.5760791", "0.5729921", "0.5715687", "0.56979316", "0.5615754", "0.55505806", "0.55295897", "0.55295897", "0.55002135", "0.5498201", "0.5496341", "0.54951996", "0.5462513", "0.5439882", "0.54309034", "0.5421717", "0.53955626", "0.53927463", "0.53541285", "0.5353893", "0.53453684", "0.5336856", "0.5329614", "0.5324905", "0.532192", "0.5315906", "0.531485", "0.5314092", "0.530122", "0.5266889", "0.5260048", "0.52495944", "0.52495944", "0.52469003", "0.5228986", "0.52238166", "0.5218845", "0.521041", "0.52009887", "0.5194418", "0.5179579", "0.5172927", "0.5172689", "0.5171359", "0.5164043", "0.51631415", "0.51553845", "0.51393247", "0.51349163", "0.513412", "0.51298815", "0.511768", "0.5107466", "0.5096872", "0.50947136", "0.50396925", "0.5038918", "0.50308394", "0.50274426", "0.5014812", "0.50050044", "0.5001785", "0.49982417", "0.49965602", "0.4987348", "0.4987147", "0.49753383", "0.49710608", "0.4955494", "0.49463782", "0.49447247", "0.49387616", "0.49365103", "0.4932719", "0.4930884", "0.49290997", "0.49265394", "0.49239963", "0.4922822", "0.49207994", "0.4920447", "0.49189836", "0.49187276", "0.4918369", "0.49171412", "0.49122605", "0.4904937", "0.48893908", "0.48845422", "0.48834416", "0.48827538", "0.4882623" ]
0.0
-1
Construct the normalized laplacian matrix.
def normalized_laplacian(degree_vector, weight_matrix, length): holders = np.zeros((length, 1)) holders[:, 0] = 1 / degree_vector return np.eye(length) - holders * weight_matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def laplacian_matrix(A, normalized=False):\n n, m = A.shape\n D = degree_matrix(A)\n L = D - A\n if normalized:\n degs = _flat(A.sum(axis=1))\n rootD = sps.spdiags(np.power(degs, -1 / 2), [0], n, n, format=\"csr\")\n L = rootD * L * rootD\n return L", "def normalize(self, lam):\n return (lam.T / np.sum(lam, axis=1)).T", "def normalize(self):\r\n max = np.amax(self.matrix)\r\n min = np.amin(self.matrix)\r\n\r\n self.matrix = ((self.matrix - min) / (max - min))", "def laplacian(W, normalized=True):\n\n # Degree matrix.\n d = W.sum(dim=0)\n\n # Laplacian matrix.\n if not normalized:\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n L = D - W\n else:\n # d += np.spacing(np.array(0, W.dtype))\n d = 1 / torch.sqrt(d)\n D = torch.diags(d.A.squeeze(), 0)\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\n L = I - D * W * D\n\n # assert np.abs(L - L.T).mean() < 1e-9\n assert type(L) is scipy.sparse.csr.csr_matrix\n return L", "def build_normalized(self):\n for row, s in enumerate(self.S):\n for col, t in enumerate(self.T):\n\n if self.symmetric and row > col:\n pass\n\n elif self.symmetric and row == col:\n self.normalized_mat[row, col] = 1\n\n else:\n self.normalized_mat[row, col] = self.normalize(row, col)\n\n if self.symmetric:\n self.normalized_mat = self.symmetrize(self.normalized_mat)", "def normalize(self):\n det = self._mat[0][0]*self._mat[1][1] - self._mat[0][1]*self._mat[1][0]\n for i in range(2):\n for j in range(2):\n self._mat[i][j] = (self._mat[i][j])/(np.sqrt(det))", "def laplacian(W, normalized=False):\r\n # Degree matrix.\r\n d = W.sum(axis=0)\r\n # Laplacian matrix.\r\n if not normalized:\r\n D = scipy.sparse.diags(d.A.squeeze(), 0)\r\n L = D - W\r\n else:\r\n # d += np.spacing(np.array(0, W.dtype))\r\n d = 1 / np.sqrt(d)\r\n D = scipy.sparse.diags(d.A.squeeze(), 0)\r\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\r\n L = I - D * W * D\r\n\r\n # assert np.abs(L - L.T).mean() < 1e-9\r\n assert type(L) is scipy.sparse.csr.csr_matrix\r\n return L", "def laplacian(A,normalize=False,randomWalk=False):\n degr = A.sum(axis=1)*1.\n\n if randomWalk:\n degr[degr!=0] = 1./degr[degr!=0]\n Dinv = np.diag(degr)\n return np.eye(A.shape[0]) - np.dot(Dinv,A)\n if normalize:\n degr = A.sum(axis=1)*1.\n degr[degr!=0] = 1./degr[degr!=0]\n Dinv = np.diag(degr)**.5\n return np.eye(A.shape[0]) - np.dot(Dinv,A).dot(Dinv)\n\n return np.diag(degr) - A", "def calculate_normalized_laplacian(adj):\n adj = sp.coo_matrix(adj)\n d = np.array(adj.sum(1))\n d_inv_sqrt = np.power(d, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n return normalized_laplacian", "def calculate_normalized_laplacian(adj):\n adj = sp.coo_matrix(adj)\n d = np.array(adj.sum(1))\n d_inv_sqrt = np.power(d, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n return normalized_laplacian", "def normalize(self,matrix):\n for i in range(self.N):\n matrix[self.N-1][i] = 0\n for i in range(self.n):\n matrix[self.N - 1][self.index(i,i)] = 1\n return matrix", "def L2_normalize(xx):\r\n\tZx = compute_L2_normalization(xx)\r\n\treturn xx / np.sqrt(Zx[:, np.newaxis])", "def laplacian(points, sigma):\n\n S = similarity_matrix(points, sigma)\n \n (npnts,npnts) = S.shape \n\n D = np.zeros_like(S)\n\n for i in range(npnts):\n #D[i,i] = 1.0 / np.sqrt(S[i,:].sum()) \n D[i,i] = S[i,:].sum()\n\n\n return (D - S) #(np.eye(npnts,npnts) - np.dot(D,np.dot(S,D)))", "def lap_mat(self):", "def lu_decomposition(self):\n if self.rows_count() != self.columns_count():\n raise ValueError(\"Matrix needs to me square for LU decomposition.\")\n for i in range(self.rows_count() - 1):\n for j in range(i + 1, self.rows_count()):\n if self[i, i] == 0: # or abs(self[i, i]) <= 0.000001):\n raise ValueError(\"Can't divide by 0\")\n self[j, i] = self[j, i] / self[i, i]\n for k in range(i + 1, self.rows_count()):\n self[j, k] -= self[j, i] * self[i, k]", "def laplacian(A):\n #calculate D by creating a diagonal matrix with the column sum of A\n D = np.diag(A.sum(axis=0))\n return D - A", "def decompose_to_LU(a):\n # create emtpy LU-matrix\n lu_matrix = np.matrix(np.zeros([a.shape[0], a.shape[1]]))\n n = a.shape[0]\n\n for k in range(n):\n # calculate all residual k-row elements\n for j in range(k, n):\n lu_matrix[k, j] = a[k, j] - lu_matrix[k, :k] * lu_matrix[:k, j]\n # calculate all residual k-column elemetns\n for i in range(k + 1, n):\n lu_matrix[i, k] = (a[i, k] - lu_matrix[i, : k] * lu_matrix[: k, k]) / lu_matrix[k, k]\n\n return lu_matrix", "def get_normalization_matrix(x):\n # Input: x 3*N\n #\n # Output: T 3x3 transformation matrix of points\n\n # TODO\n # --------------------------------------------------------------\n # Estimate transformation matrix used to normalize\n # the inputs x\n # --------------------------------------------------------------\n x2d = x[:2, :]\n # Get centroid and mean-distance to centroid\n center = np.mean(x2d, 1, keepdims=True)\n mean_dist = np.mean(np.sqrt(np.sum((x2d - center) ** 2, axis=0)))\n center = center.flatten()\n\n T = np.array([[np.sqrt(2) / mean_dist, 0, -center[0] * np.sqrt(2) / mean_dist],\n [0, np.sqrt(2) / mean_dist, -center[1] * np.sqrt(2) / mean_dist],\n [0, 0, 1]])\n\n return T", "def laplace_matrix(self):\n n = self.number_of_vertices\n laplace_matrix = np.zeros((n, n))\n for i in range(n):\n laplace_matrix[i][i] = 1\n vertice = self.list_of_vertices[i]\n for edge in vertice.edges_list:\n laplace_matrix[i][edge.linked[1].index] = 1\n return laplace_matrix", "def normalize(self, mel_db: np.ndarray) -> np.ndarray:\n mel_norm = ((mel_db - self.ref_level_db) - self.min_level_db) / (\n -self.min_level_db\n )\n if self.symmetric_norm:\n # Symmetric norm\n mel_norm = ((2 * self.max_norm) * mel_norm) - self.max_norm\n if self.clip_norm:\n mel_norm = np.clip(mel_norm, -self.max_norm, self.max_norm)\n else:\n # Asymmetric norm\n mel_norm = self.max_norm * mel_norm\n if self.clip_norm:\n mel_norm = np.clip(mel_norm, 0, self.max_norm)\n\n return mel_norm", "def LU(A):\n m, n = A.shape\n L, U = np.zeros([m, n]), np.zeros([m, n])\n for i in range(n):\n L[i][i] = 1\n\n for i in range(n):\n\n # Upper triangular matrix\n for j in range(i, n):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*U[k][j]\n U[i][j] = A[i][j] - summ\n\n # Lower triangular matrix\n for j in range(i+1, n):\n summ = 0\n for k in range(0, i):\n summ += L[j][k]*U[k][i]\n L[j][i] = (A[j][i] - summ)/U[i][i]\n return L, U", "def laplacian2(A, laplacian_type='raw'):\r\n\r\n N = A.shape[0]\r\n # TODO: Raise exception if A is not square\r\n\r\n degrees = A.sum(1)\r\n # To deal with loops, must extract diagonal part of A\r\n diagw = np.diag(A)\r\n\r\n # w will consist of non-diagonal entries only\r\n ni2, nj2 = A.nonzero()\r\n w2 = A[ni2, nj2]\r\n ndind = (ni2 != nj2).nonzero() # Non-diagonal indices\r\n ni = ni2[ndind]\r\n nj = nj2[ndind]\r\n w = w2[ndind]\r\n\r\n di = np.arange(N) # diagonal indices\r\n\r\n if laplacian_type == 'raw':\r\n # non-normalized laplaciand L = D - A\r\n L = np.diag(degrees - diagw)\r\n L[ni, nj] = -w\r\n L = lil_matrix(L)\r\n elif laplacian_type == 'normalized':\r\n # TODO: Implement the normalized laplacian case\r\n # % normalized laplacian D^(-1/2)*(D-A)*D^(-1/2)\r\n # % diagonal entries\r\n # dL=(1-diagw./degrees); % will produce NaN for degrees==0 locations\r\n # dL(degrees==0)=0;% which will be fixed here\r\n # % nondiagonal entries\r\n # ndL=-w./vec( sqrt(degrees(ni).*degrees(nj)) );\r\n # L=sparse([ni;di],[nj;di],[ndL;dL],N,N);\r\n print(\"Not implemented\")\r\n else:\r\n # TODO: Raise an exception\r\n print(\"Don't know what to do\")\r\n\r\n return L", "def getNormLaplacian(W):\n\td=[np.sum(row) for row in W]\n\tD=np.diag(d)\n\tL=D-W\n\t#Dn=D^(-1/2)\n\tDn=np.power(np.linalg.matrix_power(D,-1),0.5)\n\tLbar=np.dot(np.dot(Dn,L),Dn)\n\treturn Lbar", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def normalize(self):\n self.number_of_vectors = self.values.shape[0]\n norm_2 = np.linalg.norm(self.values, axis=1)\n norm_1 = np.sum(self.values_planar, axis=1)\n norm_2 = np.repeat(norm_2, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_1 = np.repeat(norm_1, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_2[norm_2 == 0] = np.finfo(float).eps\n self.values = np.divide(self.values, norm_2)\n self.values_planar = np.divide(self.values_planar, norm_1)", "def normc(A):\r\n # return A / LA.norm(A, axis=0)[None,:]\r\n return A/ np.tile(np.sqrt(np.sum(A*A, axis=0)), (A.shape[0], 1))", "def _assure_normalized(self):\n for iwann in range(self.nwann):\n norm = np.trace(\n self.wannR[:, :, iwann].conj().T @ self.wannR[:, :, iwann])\n #print(f\"Norm {iwann}: {norm}\")", "def _get_Laplacian_matrix(self, X):\n self.laplacian_mat, self.laplacian_sym_mat, self.laplacian_weights = self.laplacian.compute_laplacian(\n self.get_Affinity_matrix(X)\n )", "def normalize_l2(x):\n return x / (npla.norm(x))", "def laplacian_mat(n):\n data = [1, -2, 1]*n\n i = flatten([[k,k,k] for k in range(n)])\n j = flatten([[k-1, k, k+1] for k in range(n)])\n return scipy.sparse.coo_matrix((data[1:-1], (i[1:-1], j[1:-1])))", "def laplacian(A):\n D = np.diag(np.sum(A, axis=1))\n return D - A", "def set_lam_nll(self, lam_nll=1.0):\n zero_ary = np.zeros((1,))\n new_lam = zero_ary + lam_nll\n self.lam_nll.set_value(to_fX(new_lam))\n return", "def normalize_data(self):\n\t\tfull_matrix = self.balance_clases()\n\t\ttexture_matrix = Normalizer().fit_transform(X=full_matrix[:,range(0,24)])\n\n\t\treturn texture_matrix", "def symmetrized_normalized_laplacian(degree_vector, weight_matrix, length):\n holders = np.zeros((length, 1))\n holders[:, 0] = np.sqrt(1 / degree_vector)\n\n return np.eye(length) - holders * weight_matrix * holders.T", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def normalize(a):\n a = np.array(a)\n return a / np.linalg.norm(a)", "def normaliza(self):\n return self * (1 / self.norma())", "def norm_with_l2(original_mat):\n normed_mat = np.zeros(original_mat.shape, dtype=np.float32)\n if len(original_mat.shape) == 2:\n for ind_r in range(original_mat.shape[0]):\n a = np.square(original_mat[ind_r]*1.0)\n b = np.sum(a)\n c = np.sqrt(b)\n normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / c\n # normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / np.sqrt(np.sum(np.square(original_mat[ind_r])*1.0))\n return normed_mat", "def normalize(data):\n norm_matrix = np.int_(np.log10(data)**2)\n norm_matrix = map(lambda x: x if x < BOARD_SIZE else BOARD_SIZE, norm_matrix)\n norm_matrix = map(lambda x: x if x > 0 else 0, norm_matrix)\n return norm_matrix", "def LUdecomp(Ainput):\n\n n, m = np.shape(Ainput)\n \n if n != m:\n return 'Error: Please enter an invertible matrix.'\n \n U = Ainput.copy() # make copies so as not to write over originals\n L = np.zeros((np.shape(Ainput)))\n \n for i in range(0,n):\n L[i,i] = 1\n for i in range(0,n-1): # loop over pivot rows from row 1 to row n-1 (i to n-2)\n for j in range(i+1,n): # loop over row to be zero'ed from row j+1 to n (j+1 to n-1)\n c = U[j,i]/U[i,i] # multiplicative factor to zero point\n L[j,i] = c\n U[j,i] = 0.0 # we know this element goes to zero\n U[j,i+1:n]=U[j,i+1:n]-c*U[i,i+1:n] # do subtraction of two rows\n\n return (L,U) # return lower and upper decompositions", "def normalize(self):\r\n\r\n nlen = 1.0/math.sqrt(self*self)\r\n return vec4(self.x*nlen, self.y*nlen, self.z*nlen, self.w*nlen)", "def l1_normalize(x: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name\n return x / x.sum()", "def _set_ls_matrices(self):\n zz_t = self.z_matrix * self.z_matrix.transpose()\n l, s, l_t = np.linalg.svd(zz_t)\n s[self.p:] = 0\n self.l_matrix = np.matrix(l)\n self.s_matirx = np.matrix(np.diag(s))", "def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)", "def test_normalize(self):\n\n a1 = vectors.Vector(4, 0, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(1, 0, 0))\n\n a1 = vectors.Vector(0, 4, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 1, 0))\n\n a1 = vectors.Vector(0, 0, 4)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 0, 1))", "def normalized(self):\n L = self.length\n if L > pygonal.EPSILON:\n v = tuple.__new__(Vec2, (self[0] / L, self[1] / L))\n v.__dict__['length'] = v.__dict__['length2'] = 1.0\n return v\n else:\n return null", "def project_Lnuc_ball(X: \"fasta.linalg.Matrix\", t: float) -> \"fasta.linalg.Matrix\":\n U, s, V = la.svd(X)\n\n # Construct the diagonal matrix of singular values, S, as a shrunken version of the original signal values\n S = np.zeros(X.shape)\n S[:len(s),:len(s)] = np.diag(shrink(s, t))\n return U @ S @ V", "def normalized(self):\n return self.from_points(\n vector.normalized() for vector in self._vectors)", "def _get_normalized_adj_matrix(self):\n\n # find all of the unique layers in the problem (first index of category tuples)\n row_sums = self._frequency_matrix.sum(axis=1)\n normed_matrix = self._frequency_matrix / row_sums[:, np.newaxis]\n\n return normed_matrix", "def compute_L2_normalization(xx):\r\n\treturn np.sum(xx ** 2, axis=1)", "def normalize(inp):\n\n out = inp / np.linalg.norm(inp, axis=1, keepdims=True)\n\n return out", "def cal_L(self):\n # calculate the l matrix\n self.point_matrixs = self.point_matrix.reshape(\n self.point_matrix.shape[0], 1, self.point_matrix.shape[1])\n self.point_matrixs = np.tile(self.point_matrixs,\n (self.attach_points.shape[0], 1))\n self.attach_points_matrix = np.matlib.repmat(\n self.attach_points[:, 0:3], self.point_matrix.shape[0], 1)\n self.attach_points_matrix = self.attach_points_matrix.reshape(\n self.point_matrix.shape[0], self.attach_points.shape[0], 3)\n self.L = np.subtract(self.attach_points_matrix,\n self.point_matrixs)\n # self.L[:,self.attach_points[:,3]==1,:] = \\\n # - self.L[:,self.attach_points[:,3]==1,:]\n # print(self.L)", "def normalize_distancematrix(self):\n INF = self.distmat.max().max()\n df = self.distmat.fillna(INF)\n self.distmat = (df - df.min()) / (df.max() - df.min())", "def mult_L(self) -> np.ndarray:\n return np.array([\n [self.w, -self.x, -self.y, -self.z],\n [self.x, self.w, -self.z, self.y],\n [self.y, self.z, self.w, -self.x],\n [self.z, -self.y, self.x, self.w]])", "def normalize_matrix(mat):\n return (mat + abs(mat.min())) / (mat.max() - mat.min())", "def prox_l1_norm(w, lamb):\n\treturn np.sign(w) * np.maximum( np.abs(w) - lamb, 0)", "def normalize_lorentz_1d(\n lorentz, old_f_scale, old_v_scale, new_f_scale=(\n 0, 1, 1024), new_v_scale=(\n 0, 1, 1024)):\n A0 = lorentz[0]\n f0 = lorentz[1]\n G0 = lorentz[2]\n A1 = normalize_0d(A0 + old_v_scale[0], old_v_scale, new_v_scale)\n f1 = normalize_0d(f0, old_f_scale, new_f_scale)\n G1 = normalize_0d(G0 + old_f_scale[0], old_f_scale, new_f_scale)\n return np.array([A1, f1, G1, lorentz[3]])", "def test_normalize_matrix(self):\n input_matrix = [\n [0, 1.0],\n [1.0, 1.0]\n ]\n\n expected = [\n [0, 1],\n [0.5, 0.5]\n ]\n\n result = self.summarizer.normalize_matrix(input_matrix)\n\n self.assertEqual(expected, result)", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n # using l2 norm to normalize\n x = x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))\n ### END YOUR CODE\n\n return x", "def laplacian_(self, grid, i, j):\n l1 = grid[(i+1+self.N) % self.N][j] + grid[(i-1+self.N) % self.N][j]\n l2 = grid[i][(j+1+self.N) % self.N] + grid[i][(j-1+self.N) % self.N]\n l3 = -4*grid[i][j]\n return (l1 + l2 + l3)/self.dx**2", "def normalize(self) -> None:\n assert hasattr(self, \"characterized_inventory\"), \"Must do lcia first\"\n if not hasattr(self, \"normalization_matrix\"):\n self.load_normalization_data()\n self.normalization_calculation()", "def local_to_normalized(npboxes: np.ndarray, window: Box):\n height, width = window.size\n return npboxes / np.array([[height, width, height, width]])", "def laplacian_1d(window_size) -> torch.Tensor:\n\n filter_1d = torch.ones(window_size)\n filter_1d[window_size // 2] = 1 - window_size\n laplacian_1d: torch.Tensor = filter_1d\n return laplacian_1d", "def lMatrix(a, b, c, alpha, beta, gamma, convention=None):\n lMatrix = np.zeros((3, 3))\n\n cosAlpha = np.cos(alpha)\n cosBeta = np.cos(beta)\n cosGamma = np.cos(gamma)\n\n sinGamma = np.sin(gamma)\n\n lMatrix[0, 0] = a\n lMatrix[0, 1] = b * cosGamma\n lMatrix[0, 2] = c * cosBeta\n\n lMatrix[1, 1] = b * sinGamma\n lMatrix[1, 2] = c * (cosAlpha - cosBeta * cosGamma) / sinGamma\n\n lMatrix[2, 2] = c * np.sqrt(\n 1 + 2 * cosAlpha * cosBeta * cosGamma -\n cosAlpha**2 - cosBeta**2 - cosGamma**2\n ) / sinGamma\n\n # OI/HKL convention - x // [10-10], y // a2 [-12-10]\n # TSL convention - x // a1 [2-1-10], y // [01-10]\n if convention is None:\n convention = defaults['crystal_ortho_conv']\n\n if convention.lower() in ['hkl', 'oi']:\n # Swap 00 with 11 and 01 with 10 due to how OI orthonormalises\n # From Brad Wynne\n t1 = lMatrix[0, 0]\n t2 = lMatrix[1, 0]\n\n lMatrix[0, 0] = lMatrix[1, 1]\n lMatrix[1, 0] = lMatrix[0, 1]\n\n lMatrix[1, 1] = t1\n lMatrix[0, 1] = t2\n\n elif convention.lower() != 'tsl':\n raise ValueError(\n f\"Unknown convention '{convention}' for orthonormalisation of \"\n f\"crystal structure, can be 'hkl' or 'tsl'\"\n )\n\n # Set small components to 0\n lMatrix[np.abs(lMatrix) < 1e-10] = 0\n\n return lMatrix", "def normalize_face_landmarks(face_landmarks):\r\n\tface_landmarks_norm = np.zeros(face_landmarks.shape)\r\n\t\r\n\tfor (i, lm) in enumerate(face_landmarks):\r\n\t\tface_landmarks_norm[i] = lm - lm[nose_center_idx]\r\n\t\t\t\r\n\tstd_x = np.std(face_landmarks_norm[:,:,0].reshape((-1,)))\r\n\tstd_y = np.std(face_landmarks_norm[:,:,1].reshape((-1,)))\r\n\t\r\n\tface_landmarks_norm[:,:,0] = np.multiply(face_landmarks_norm[:,:,0], 1./std_x)\r\n\tface_landmarks_norm[:,:,1] = np.multiply(face_landmarks_norm[:,:,1], 1./std_y)\r\n\t\r\n\treturn face_landmarks_norm", "def MillerNormalVectors_111():\r\n k_111 = np.array(\r\n [[1, 1, 1], [-1, 1, 1], [1, -1, 1], [1, 1, -1]])/np.sqrt(3.0)\r\n return np.array([k_111[0, :], k_111[1, :], k_111[2, :], k_111[3, :],\r\n -k_111[0, :], -k_111[1, :], -k_111[2, :], -k_111[3, :]])", "def prepare_laplacian(laplacian):\n\n def estimate_lmax(laplacian, tol=5e-3):\n r\"\"\"Estimate the largest eigenvalue of an operator.\"\"\"\n lmax = sparse.linalg.eigsh(laplacian, k=1, tol=tol,\n ncv=min(laplacian.shape[0], 10),\n return_eigenvectors=False)\n lmax = lmax[0]\n lmax *= 1 + 2 * tol # Be robust to errors.\n return lmax\n\n def scale_operator(L, lmax, scale=1):\n r\"\"\"Scale the eigenvalues from [0, lmax] to [-scale, scale].\"\"\"\n I = sparse.identity(L.shape[0], format=L.format, dtype=L.dtype)\n L *= 2 * scale / lmax\n L -= I\n return L\n\n lmax = estimate_lmax(laplacian)\n laplacian = scale_operator(laplacian, lmax)\n\n laplacian = sparse.coo_matrix(laplacian)\n\n # PyTorch wants a LongTensor (int64) as indices (it'll otherwise convert).\n indices = np.empty((2, laplacian.nnz), dtype=np.int64)\n np.stack((laplacian.row, laplacian.col), axis=0, out=indices)\n indices = torch.from_numpy(indices)\n\n laplacian = torch.sparse_coo_tensor(indices, laplacian.data, laplacian.shape)\n laplacian = laplacian.coalesce() # More efficient subsequent operations.\n return laplacian", "def normalized(self):\n len = self.length\n return Vector(self.x / len, self.y / len)", "def _normalize(X: np.ndarray) -> np.ndarray:\n # return X * np.sqrt(1 / np.sum(X ** 2, axis=1))[:, None]\n return X * np.sqrt(X.shape[1] / np.sum(X ** 2, axis=1))[:, None]", "def _prepare_outer_matrix(self):\n self._mat_plane = numpy.array([\n self._scaling[0], 0, 0, 0,\n 0, self._scaling[1], 0, 0,\n 0, 0, 1, 0,\n self.i_border[0], -self.i_border[1], 0, 1\n ], dtype=numpy.float32)", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def _normalize(a: np.ndarray, u: float=0, s: float=1) -> np.ndarray:\n a_norm = (a - np.mean(a)) / (np.std(a) + STABILITY)\n a_rescaled = a_norm * s + u\n\n return a_rescaled", "def rownorm(a):\n return sum(array(a)**2,axis=1)**.5", "def normalize(self):\n\n if not self.magnitude():\n return Vector(0, 0)\n\n l = 1 / self.magnitude()\n return self.scale(l)", "def get_norma(self):\n return self.norma", "def normalized(self):\n length = self.length\n if length != 0:\n return self/length\n return Vec2d(self)", "def normalisation_l_inf(x):\n res = np.zeros(x.shape)\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n res[i,j] = x[i,j]/(np.max(x[i,j])+1e-5)\n return(res)", "def normalize(self):\n self.vector /= np.linalg.norm(self.vector)", "def normalize_matrix(matrix, min_val, max_val):\n return (max_val - min_val) * (matrix - np.min(matrix)) / (np.max(matrix) - np.min(matrix)) + min_val", "def normalize(input_matrix):\n\n row_sums = input_matrix.sum(axis=1)\n try:\n assert (np.count_nonzero(row_sums)==np.shape(row_sums)[0]) # no row should sum to zero\n except Exception:\n raise Exception(\"Error while normalizing. Row(s) sum to zero\")\n new_matrix = input_matrix / row_sums[:, np.newaxis]\n return new_matrix", "def L1Norm(X):\n return max(np.sum(X,axis=0))", "def test_LU(self):\n A = np.random.rand(10, 10)\n MA = to_matrix(A)\n ML, MU = MA.decomposeLU()\n self.assertEqual(ML*MU, MA)\n self.assertTrue(ML.is_lower_triangular())\n self.assertTrue(MU.is_upper_triangular())", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalization_calculation(self) -> None:\n self.normalized_inventory = (\n self.normalization_matrix * self.characterized_inventory\n )", "def normalize(self):\n return Vector(self.args + []) / self.magnitude()", "def normalize(x):\n\n x_norm = np.linalg.norm(x, axis=1, keepdims=True)\n print(x_norm)\n x = x / x_norm\n ### END\n\n return x", "def norm(self):", "def normalize(self):\n self.length = np.ones(self.nV)\n return self", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n norm2 = np.linalg.norm(x,2,axis = 1).reshape(x.shape[0],-1)\n x = x/norm2\n ### END YOUR CODE\n\n return x", "def lap_normalize(img, scale_n=4):\n # img = tf.expand_dims(img, 0)\n # print(\"Inside lap_normalize Function, img shape {}\".format(tf.shape(img)))\n\n tlevels = lap_split_n(img, scale_n)\n tlevels = list(map(normalize_std, tlevels))\n\n out = lap_merge(tlevels)\n\n return out[0, :, :, :]", "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def unnormalize(self, inputs: Tensor) -> Tensor:\n # The normalizing constants are applied to the entire row, so add dummy outputs to the\n # inputs to make a row.\n row = torch.zeros_like(self._row_range)\n row[self._input_column_indices] = inputs\n row *= self._row_range\n row += self._row_min\n return row[self._input_column_indices]", "def lup_decomposition(self):\n p = [i for i in range(self.rows_count())]\n for i in range(self.rows_count() - 1):\n pivot = i\n for j in range(i + 1, self.rows_count()):\n if abs(self[p[j], i]) > abs(self[p[pivot], i]):\n pivot = j\n p[pivot], p[i] = p[i], p[pivot]\n for j in range(i + 1, self.rows_count()):\n if abs(self[p[i], i]) < math.pow(10, -6):\n raise ValueError(\"Can't divide by 0\")\n self[p[j], i] /= self[p[i], i]\n for k in range(i + 1, self.rows_count()):\n self[p[j], k] -= self[p[j], i] * self[p[i], k]\n lst = []\n for i in p:\n lst.append(self.value[i])\n return p, Matrix(lst)", "def norm(self):\r\n old_origin = np.array(self.origin)\r\n self.origin = [0, 0, 0]\r\n old_origin[0] = old_origin[0] / self.x[0]\r\n old_origin[1] = old_origin[1] / self.y[1]\r\n old_origin[2] = old_origin[2] / self.z[2]\r\n self.data = ndimage.shift(self.data, -old_origin, mode='wrap')", "def normalize(self):\n self.desc += \", normalize\"\n self._vecs /= np.linalg.norm(self._vecs, axis=1)[:, np.newaxis]\n self.reindex()", "def laplacian(self, array_in):\r\n\r\n # Call-through to Laplacian operator, already computed\r\n return self.laplace_op*array_in", "def nll(self, data):\n nll = np.zeros(data.shape[0])\n self.a[0] = self.c\n for i in range(self.Ndim):\n a = self.rhos[i] * self.a[i]\n h = 0.5 * (a + np.abs(a)) # ReLU\n za = np.dot(self.Vs['alpha'][i].T, h) + self.bs['alpha'][i]\n zm = np.dot(self.Vs['mu'][i].T, h) + self.bs['mu'][i]\n zs = np.dot(self.Vs['sigma'][i].T, h) + self.bs['sigma'][i]\n self.alphas[i] = softmax(za)\n self.mus[i] = zm\n self.sigmas[i] = np.exp(zs)\n self.vars = self.sigmas ** 2.\n nll += nll_MOG_1D(data[:, i], self.alphas[i], self.mus[i],\n self.vars[i])[0]\n return nll", "def normalize(self):\n self._vectors = [vector.normalized() for vector in self._vectors]", "def normalize_lorentz_2d(\n lorentz, old_f_scale, old_v_scale, new_f_scale=(\n 0, 1, 1024), new_v_scale=(\n 0, 1, 1024)):\n lorentz_array = np.empty((0, 4))\n for i in range(0, lorentz.shape[0]):\n l0 = lorentz[i]\n l1 = normalize_lorentz_1d(\n l0, old_f_scale, old_v_scale, new_f_scale, new_v_scale)\n lorentz_array = np.append(lorentz_array, np.array([l1]), axis=0)\n return lorentz_array", "def normalise(a):\n return (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a))" ]
[ "0.6907984", "0.6711874", "0.62322444", "0.6217343", "0.620613", "0.6196674", "0.6158735", "0.6132494", "0.61195916", "0.61195916", "0.6036492", "0.594732", "0.59065795", "0.5906229", "0.58959603", "0.5889874", "0.5884998", "0.58620787", "0.5861111", "0.5846933", "0.58036464", "0.5801993", "0.5788728", "0.577097", "0.5767137", "0.57450557", "0.573835", "0.5735539", "0.5723342", "0.56960505", "0.5691527", "0.5675007", "0.5665385", "0.5650528", "0.5626447", "0.5625727", "0.5621359", "0.5615229", "0.56072205", "0.56044185", "0.5593428", "0.55729896", "0.5563359", "0.55265945", "0.55002975", "0.54988915", "0.5484774", "0.5480021", "0.5475302", "0.5464984", "0.546461", "0.54483265", "0.5446641", "0.54449135", "0.5430567", "0.5427673", "0.5417368", "0.5412706", "0.541223", "0.54117167", "0.5411509", "0.54114646", "0.54078287", "0.5398319", "0.53982574", "0.5393246", "0.5390396", "0.5373508", "0.53556174", "0.53553236", "0.53543246", "0.5349406", "0.5349223", "0.5345089", "0.5344909", "0.534365", "0.53436196", "0.5342364", "0.53417313", "0.534133", "0.5335337", "0.53315496", "0.5324727", "0.5322699", "0.5321293", "0.53199786", "0.53199357", "0.53152794", "0.5314903", "0.5311885", "0.5302912", "0.5301873", "0.53007627", "0.52982205", "0.52981406", "0.5289843", "0.52867776", "0.52819157", "0.5280653", "0.5270947" ]
0.6199283
5
Construct the normalized laplacian matrix.
def symmetrized_normalized_laplacian(degree_vector, weight_matrix, length): holders = np.zeros((length, 1)) holders[:, 0] = np.sqrt(1 / degree_vector) return np.eye(length) - holders * weight_matrix * holders.T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def laplacian_matrix(A, normalized=False):\n n, m = A.shape\n D = degree_matrix(A)\n L = D - A\n if normalized:\n degs = _flat(A.sum(axis=1))\n rootD = sps.spdiags(np.power(degs, -1 / 2), [0], n, n, format=\"csr\")\n L = rootD * L * rootD\n return L", "def normalize(self, lam):\n return (lam.T / np.sum(lam, axis=1)).T", "def normalize(self):\r\n max = np.amax(self.matrix)\r\n min = np.amin(self.matrix)\r\n\r\n self.matrix = ((self.matrix - min) / (max - min))", "def laplacian(W, normalized=True):\n\n # Degree matrix.\n d = W.sum(dim=0)\n\n # Laplacian matrix.\n if not normalized:\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n L = D - W\n else:\n # d += np.spacing(np.array(0, W.dtype))\n d = 1 / torch.sqrt(d)\n D = torch.diags(d.A.squeeze(), 0)\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\n L = I - D * W * D\n\n # assert np.abs(L - L.T).mean() < 1e-9\n assert type(L) is scipy.sparse.csr.csr_matrix\n return L", "def build_normalized(self):\n for row, s in enumerate(self.S):\n for col, t in enumerate(self.T):\n\n if self.symmetric and row > col:\n pass\n\n elif self.symmetric and row == col:\n self.normalized_mat[row, col] = 1\n\n else:\n self.normalized_mat[row, col] = self.normalize(row, col)\n\n if self.symmetric:\n self.normalized_mat = self.symmetrize(self.normalized_mat)", "def normalize(self):\n det = self._mat[0][0]*self._mat[1][1] - self._mat[0][1]*self._mat[1][0]\n for i in range(2):\n for j in range(2):\n self._mat[i][j] = (self._mat[i][j])/(np.sqrt(det))", "def normalized_laplacian(degree_vector, weight_matrix, length):\n holders = np.zeros((length, 1))\n holders[:, 0] = 1 / degree_vector\n\n return np.eye(length) - holders * weight_matrix", "def laplacian(W, normalized=False):\r\n # Degree matrix.\r\n d = W.sum(axis=0)\r\n # Laplacian matrix.\r\n if not normalized:\r\n D = scipy.sparse.diags(d.A.squeeze(), 0)\r\n L = D - W\r\n else:\r\n # d += np.spacing(np.array(0, W.dtype))\r\n d = 1 / np.sqrt(d)\r\n D = scipy.sparse.diags(d.A.squeeze(), 0)\r\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\r\n L = I - D * W * D\r\n\r\n # assert np.abs(L - L.T).mean() < 1e-9\r\n assert type(L) is scipy.sparse.csr.csr_matrix\r\n return L", "def laplacian(A,normalize=False,randomWalk=False):\n degr = A.sum(axis=1)*1.\n\n if randomWalk:\n degr[degr!=0] = 1./degr[degr!=0]\n Dinv = np.diag(degr)\n return np.eye(A.shape[0]) - np.dot(Dinv,A)\n if normalize:\n degr = A.sum(axis=1)*1.\n degr[degr!=0] = 1./degr[degr!=0]\n Dinv = np.diag(degr)**.5\n return np.eye(A.shape[0]) - np.dot(Dinv,A).dot(Dinv)\n\n return np.diag(degr) - A", "def calculate_normalized_laplacian(adj):\n adj = sp.coo_matrix(adj)\n d = np.array(adj.sum(1))\n d_inv_sqrt = np.power(d, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n return normalized_laplacian", "def calculate_normalized_laplacian(adj):\n adj = sp.coo_matrix(adj)\n d = np.array(adj.sum(1))\n d_inv_sqrt = np.power(d, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n return normalized_laplacian", "def normalize(self,matrix):\n for i in range(self.N):\n matrix[self.N-1][i] = 0\n for i in range(self.n):\n matrix[self.N - 1][self.index(i,i)] = 1\n return matrix", "def L2_normalize(xx):\r\n\tZx = compute_L2_normalization(xx)\r\n\treturn xx / np.sqrt(Zx[:, np.newaxis])", "def laplacian(points, sigma):\n\n S = similarity_matrix(points, sigma)\n \n (npnts,npnts) = S.shape \n\n D = np.zeros_like(S)\n\n for i in range(npnts):\n #D[i,i] = 1.0 / np.sqrt(S[i,:].sum()) \n D[i,i] = S[i,:].sum()\n\n\n return (D - S) #(np.eye(npnts,npnts) - np.dot(D,np.dot(S,D)))", "def lap_mat(self):", "def lu_decomposition(self):\n if self.rows_count() != self.columns_count():\n raise ValueError(\"Matrix needs to me square for LU decomposition.\")\n for i in range(self.rows_count() - 1):\n for j in range(i + 1, self.rows_count()):\n if self[i, i] == 0: # or abs(self[i, i]) <= 0.000001):\n raise ValueError(\"Can't divide by 0\")\n self[j, i] = self[j, i] / self[i, i]\n for k in range(i + 1, self.rows_count()):\n self[j, k] -= self[j, i] * self[i, k]", "def laplacian(A):\n #calculate D by creating a diagonal matrix with the column sum of A\n D = np.diag(A.sum(axis=0))\n return D - A", "def decompose_to_LU(a):\n # create emtpy LU-matrix\n lu_matrix = np.matrix(np.zeros([a.shape[0], a.shape[1]]))\n n = a.shape[0]\n\n for k in range(n):\n # calculate all residual k-row elements\n for j in range(k, n):\n lu_matrix[k, j] = a[k, j] - lu_matrix[k, :k] * lu_matrix[:k, j]\n # calculate all residual k-column elemetns\n for i in range(k + 1, n):\n lu_matrix[i, k] = (a[i, k] - lu_matrix[i, : k] * lu_matrix[: k, k]) / lu_matrix[k, k]\n\n return lu_matrix", "def get_normalization_matrix(x):\n # Input: x 3*N\n #\n # Output: T 3x3 transformation matrix of points\n\n # TODO\n # --------------------------------------------------------------\n # Estimate transformation matrix used to normalize\n # the inputs x\n # --------------------------------------------------------------\n x2d = x[:2, :]\n # Get centroid and mean-distance to centroid\n center = np.mean(x2d, 1, keepdims=True)\n mean_dist = np.mean(np.sqrt(np.sum((x2d - center) ** 2, axis=0)))\n center = center.flatten()\n\n T = np.array([[np.sqrt(2) / mean_dist, 0, -center[0] * np.sqrt(2) / mean_dist],\n [0, np.sqrt(2) / mean_dist, -center[1] * np.sqrt(2) / mean_dist],\n [0, 0, 1]])\n\n return T", "def laplace_matrix(self):\n n = self.number_of_vertices\n laplace_matrix = np.zeros((n, n))\n for i in range(n):\n laplace_matrix[i][i] = 1\n vertice = self.list_of_vertices[i]\n for edge in vertice.edges_list:\n laplace_matrix[i][edge.linked[1].index] = 1\n return laplace_matrix", "def normalize(self, mel_db: np.ndarray) -> np.ndarray:\n mel_norm = ((mel_db - self.ref_level_db) - self.min_level_db) / (\n -self.min_level_db\n )\n if self.symmetric_norm:\n # Symmetric norm\n mel_norm = ((2 * self.max_norm) * mel_norm) - self.max_norm\n if self.clip_norm:\n mel_norm = np.clip(mel_norm, -self.max_norm, self.max_norm)\n else:\n # Asymmetric norm\n mel_norm = self.max_norm * mel_norm\n if self.clip_norm:\n mel_norm = np.clip(mel_norm, 0, self.max_norm)\n\n return mel_norm", "def LU(A):\n m, n = A.shape\n L, U = np.zeros([m, n]), np.zeros([m, n])\n for i in range(n):\n L[i][i] = 1\n\n for i in range(n):\n\n # Upper triangular matrix\n for j in range(i, n):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*U[k][j]\n U[i][j] = A[i][j] - summ\n\n # Lower triangular matrix\n for j in range(i+1, n):\n summ = 0\n for k in range(0, i):\n summ += L[j][k]*U[k][i]\n L[j][i] = (A[j][i] - summ)/U[i][i]\n return L, U", "def laplacian2(A, laplacian_type='raw'):\r\n\r\n N = A.shape[0]\r\n # TODO: Raise exception if A is not square\r\n\r\n degrees = A.sum(1)\r\n # To deal with loops, must extract diagonal part of A\r\n diagw = np.diag(A)\r\n\r\n # w will consist of non-diagonal entries only\r\n ni2, nj2 = A.nonzero()\r\n w2 = A[ni2, nj2]\r\n ndind = (ni2 != nj2).nonzero() # Non-diagonal indices\r\n ni = ni2[ndind]\r\n nj = nj2[ndind]\r\n w = w2[ndind]\r\n\r\n di = np.arange(N) # diagonal indices\r\n\r\n if laplacian_type == 'raw':\r\n # non-normalized laplaciand L = D - A\r\n L = np.diag(degrees - diagw)\r\n L[ni, nj] = -w\r\n L = lil_matrix(L)\r\n elif laplacian_type == 'normalized':\r\n # TODO: Implement the normalized laplacian case\r\n # % normalized laplacian D^(-1/2)*(D-A)*D^(-1/2)\r\n # % diagonal entries\r\n # dL=(1-diagw./degrees); % will produce NaN for degrees==0 locations\r\n # dL(degrees==0)=0;% which will be fixed here\r\n # % nondiagonal entries\r\n # ndL=-w./vec( sqrt(degrees(ni).*degrees(nj)) );\r\n # L=sparse([ni;di],[nj;di],[ndL;dL],N,N);\r\n print(\"Not implemented\")\r\n else:\r\n # TODO: Raise an exception\r\n print(\"Don't know what to do\")\r\n\r\n return L", "def getNormLaplacian(W):\n\td=[np.sum(row) for row in W]\n\tD=np.diag(d)\n\tL=D-W\n\t#Dn=D^(-1/2)\n\tDn=np.power(np.linalg.matrix_power(D,-1),0.5)\n\tLbar=np.dot(np.dot(Dn,L),Dn)\n\treturn Lbar", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def normalize(self):\n self.number_of_vectors = self.values.shape[0]\n norm_2 = np.linalg.norm(self.values, axis=1)\n norm_1 = np.sum(self.values_planar, axis=1)\n norm_2 = np.repeat(norm_2, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_1 = np.repeat(norm_1, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_2[norm_2 == 0] = np.finfo(float).eps\n self.values = np.divide(self.values, norm_2)\n self.values_planar = np.divide(self.values_planar, norm_1)", "def normc(A):\r\n # return A / LA.norm(A, axis=0)[None,:]\r\n return A/ np.tile(np.sqrt(np.sum(A*A, axis=0)), (A.shape[0], 1))", "def _assure_normalized(self):\n for iwann in range(self.nwann):\n norm = np.trace(\n self.wannR[:, :, iwann].conj().T @ self.wannR[:, :, iwann])\n #print(f\"Norm {iwann}: {norm}\")", "def _get_Laplacian_matrix(self, X):\n self.laplacian_mat, self.laplacian_sym_mat, self.laplacian_weights = self.laplacian.compute_laplacian(\n self.get_Affinity_matrix(X)\n )", "def normalize_l2(x):\n return x / (npla.norm(x))", "def laplacian_mat(n):\n data = [1, -2, 1]*n\n i = flatten([[k,k,k] for k in range(n)])\n j = flatten([[k-1, k, k+1] for k in range(n)])\n return scipy.sparse.coo_matrix((data[1:-1], (i[1:-1], j[1:-1])))", "def laplacian(A):\n D = np.diag(np.sum(A, axis=1))\n return D - A", "def set_lam_nll(self, lam_nll=1.0):\n zero_ary = np.zeros((1,))\n new_lam = zero_ary + lam_nll\n self.lam_nll.set_value(to_fX(new_lam))\n return", "def normalize_data(self):\n\t\tfull_matrix = self.balance_clases()\n\t\ttexture_matrix = Normalizer().fit_transform(X=full_matrix[:,range(0,24)])\n\n\t\treturn texture_matrix", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def normalize(a):\n a = np.array(a)\n return a / np.linalg.norm(a)", "def normaliza(self):\n return self * (1 / self.norma())", "def norm_with_l2(original_mat):\n normed_mat = np.zeros(original_mat.shape, dtype=np.float32)\n if len(original_mat.shape) == 2:\n for ind_r in range(original_mat.shape[0]):\n a = np.square(original_mat[ind_r]*1.0)\n b = np.sum(a)\n c = np.sqrt(b)\n normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / c\n # normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / np.sqrt(np.sum(np.square(original_mat[ind_r])*1.0))\n return normed_mat", "def normalize(data):\n norm_matrix = np.int_(np.log10(data)**2)\n norm_matrix = map(lambda x: x if x < BOARD_SIZE else BOARD_SIZE, norm_matrix)\n norm_matrix = map(lambda x: x if x > 0 else 0, norm_matrix)\n return norm_matrix", "def LUdecomp(Ainput):\n\n n, m = np.shape(Ainput)\n \n if n != m:\n return 'Error: Please enter an invertible matrix.'\n \n U = Ainput.copy() # make copies so as not to write over originals\n L = np.zeros((np.shape(Ainput)))\n \n for i in range(0,n):\n L[i,i] = 1\n for i in range(0,n-1): # loop over pivot rows from row 1 to row n-1 (i to n-2)\n for j in range(i+1,n): # loop over row to be zero'ed from row j+1 to n (j+1 to n-1)\n c = U[j,i]/U[i,i] # multiplicative factor to zero point\n L[j,i] = c\n U[j,i] = 0.0 # we know this element goes to zero\n U[j,i+1:n]=U[j,i+1:n]-c*U[i,i+1:n] # do subtraction of two rows\n\n return (L,U) # return lower and upper decompositions", "def normalize(self):\r\n\r\n nlen = 1.0/math.sqrt(self*self)\r\n return vec4(self.x*nlen, self.y*nlen, self.z*nlen, self.w*nlen)", "def l1_normalize(x: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name\n return x / x.sum()", "def _set_ls_matrices(self):\n zz_t = self.z_matrix * self.z_matrix.transpose()\n l, s, l_t = np.linalg.svd(zz_t)\n s[self.p:] = 0\n self.l_matrix = np.matrix(l)\n self.s_matirx = np.matrix(np.diag(s))", "def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)", "def test_normalize(self):\n\n a1 = vectors.Vector(4, 0, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(1, 0, 0))\n\n a1 = vectors.Vector(0, 4, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 1, 0))\n\n a1 = vectors.Vector(0, 0, 4)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 0, 1))", "def normalized(self):\n L = self.length\n if L > pygonal.EPSILON:\n v = tuple.__new__(Vec2, (self[0] / L, self[1] / L))\n v.__dict__['length'] = v.__dict__['length2'] = 1.0\n return v\n else:\n return null", "def project_Lnuc_ball(X: \"fasta.linalg.Matrix\", t: float) -> \"fasta.linalg.Matrix\":\n U, s, V = la.svd(X)\n\n # Construct the diagonal matrix of singular values, S, as a shrunken version of the original signal values\n S = np.zeros(X.shape)\n S[:len(s),:len(s)] = np.diag(shrink(s, t))\n return U @ S @ V", "def normalized(self):\n return self.from_points(\n vector.normalized() for vector in self._vectors)", "def _get_normalized_adj_matrix(self):\n\n # find all of the unique layers in the problem (first index of category tuples)\n row_sums = self._frequency_matrix.sum(axis=1)\n normed_matrix = self._frequency_matrix / row_sums[:, np.newaxis]\n\n return normed_matrix", "def normalize(inp):\n\n out = inp / np.linalg.norm(inp, axis=1, keepdims=True)\n\n return out", "def compute_L2_normalization(xx):\r\n\treturn np.sum(xx ** 2, axis=1)", "def normalize_distancematrix(self):\n INF = self.distmat.max().max()\n df = self.distmat.fillna(INF)\n self.distmat = (df - df.min()) / (df.max() - df.min())", "def cal_L(self):\n # calculate the l matrix\n self.point_matrixs = self.point_matrix.reshape(\n self.point_matrix.shape[0], 1, self.point_matrix.shape[1])\n self.point_matrixs = np.tile(self.point_matrixs,\n (self.attach_points.shape[0], 1))\n self.attach_points_matrix = np.matlib.repmat(\n self.attach_points[:, 0:3], self.point_matrix.shape[0], 1)\n self.attach_points_matrix = self.attach_points_matrix.reshape(\n self.point_matrix.shape[0], self.attach_points.shape[0], 3)\n self.L = np.subtract(self.attach_points_matrix,\n self.point_matrixs)\n # self.L[:,self.attach_points[:,3]==1,:] = \\\n # - self.L[:,self.attach_points[:,3]==1,:]\n # print(self.L)", "def mult_L(self) -> np.ndarray:\n return np.array([\n [self.w, -self.x, -self.y, -self.z],\n [self.x, self.w, -self.z, self.y],\n [self.y, self.z, self.w, -self.x],\n [self.z, -self.y, self.x, self.w]])", "def normalize_matrix(mat):\n return (mat + abs(mat.min())) / (mat.max() - mat.min())", "def prox_l1_norm(w, lamb):\n\treturn np.sign(w) * np.maximum( np.abs(w) - lamb, 0)", "def normalize_lorentz_1d(\n lorentz, old_f_scale, old_v_scale, new_f_scale=(\n 0, 1, 1024), new_v_scale=(\n 0, 1, 1024)):\n A0 = lorentz[0]\n f0 = lorentz[1]\n G0 = lorentz[2]\n A1 = normalize_0d(A0 + old_v_scale[0], old_v_scale, new_v_scale)\n f1 = normalize_0d(f0, old_f_scale, new_f_scale)\n G1 = normalize_0d(G0 + old_f_scale[0], old_f_scale, new_f_scale)\n return np.array([A1, f1, G1, lorentz[3]])", "def test_normalize_matrix(self):\n input_matrix = [\n [0, 1.0],\n [1.0, 1.0]\n ]\n\n expected = [\n [0, 1],\n [0.5, 0.5]\n ]\n\n result = self.summarizer.normalize_matrix(input_matrix)\n\n self.assertEqual(expected, result)", "def normalize(self) -> None:\n assert hasattr(self, \"characterized_inventory\"), \"Must do lcia first\"\n if not hasattr(self, \"normalization_matrix\"):\n self.load_normalization_data()\n self.normalization_calculation()", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n # using l2 norm to normalize\n x = x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))\n ### END YOUR CODE\n\n return x", "def local_to_normalized(npboxes: np.ndarray, window: Box):\n height, width = window.size\n return npboxes / np.array([[height, width, height, width]])", "def laplacian_(self, grid, i, j):\n l1 = grid[(i+1+self.N) % self.N][j] + grid[(i-1+self.N) % self.N][j]\n l2 = grid[i][(j+1+self.N) % self.N] + grid[i][(j-1+self.N) % self.N]\n l3 = -4*grid[i][j]\n return (l1 + l2 + l3)/self.dx**2", "def laplacian_1d(window_size) -> torch.Tensor:\n\n filter_1d = torch.ones(window_size)\n filter_1d[window_size // 2] = 1 - window_size\n laplacian_1d: torch.Tensor = filter_1d\n return laplacian_1d", "def normalize_face_landmarks(face_landmarks):\r\n\tface_landmarks_norm = np.zeros(face_landmarks.shape)\r\n\t\r\n\tfor (i, lm) in enumerate(face_landmarks):\r\n\t\tface_landmarks_norm[i] = lm - lm[nose_center_idx]\r\n\t\t\t\r\n\tstd_x = np.std(face_landmarks_norm[:,:,0].reshape((-1,)))\r\n\tstd_y = np.std(face_landmarks_norm[:,:,1].reshape((-1,)))\r\n\t\r\n\tface_landmarks_norm[:,:,0] = np.multiply(face_landmarks_norm[:,:,0], 1./std_x)\r\n\tface_landmarks_norm[:,:,1] = np.multiply(face_landmarks_norm[:,:,1], 1./std_y)\r\n\t\r\n\treturn face_landmarks_norm", "def lMatrix(a, b, c, alpha, beta, gamma, convention=None):\n lMatrix = np.zeros((3, 3))\n\n cosAlpha = np.cos(alpha)\n cosBeta = np.cos(beta)\n cosGamma = np.cos(gamma)\n\n sinGamma = np.sin(gamma)\n\n lMatrix[0, 0] = a\n lMatrix[0, 1] = b * cosGamma\n lMatrix[0, 2] = c * cosBeta\n\n lMatrix[1, 1] = b * sinGamma\n lMatrix[1, 2] = c * (cosAlpha - cosBeta * cosGamma) / sinGamma\n\n lMatrix[2, 2] = c * np.sqrt(\n 1 + 2 * cosAlpha * cosBeta * cosGamma -\n cosAlpha**2 - cosBeta**2 - cosGamma**2\n ) / sinGamma\n\n # OI/HKL convention - x // [10-10], y // a2 [-12-10]\n # TSL convention - x // a1 [2-1-10], y // [01-10]\n if convention is None:\n convention = defaults['crystal_ortho_conv']\n\n if convention.lower() in ['hkl', 'oi']:\n # Swap 00 with 11 and 01 with 10 due to how OI orthonormalises\n # From Brad Wynne\n t1 = lMatrix[0, 0]\n t2 = lMatrix[1, 0]\n\n lMatrix[0, 0] = lMatrix[1, 1]\n lMatrix[1, 0] = lMatrix[0, 1]\n\n lMatrix[1, 1] = t1\n lMatrix[0, 1] = t2\n\n elif convention.lower() != 'tsl':\n raise ValueError(\n f\"Unknown convention '{convention}' for orthonormalisation of \"\n f\"crystal structure, can be 'hkl' or 'tsl'\"\n )\n\n # Set small components to 0\n lMatrix[np.abs(lMatrix) < 1e-10] = 0\n\n return lMatrix", "def MillerNormalVectors_111():\r\n k_111 = np.array(\r\n [[1, 1, 1], [-1, 1, 1], [1, -1, 1], [1, 1, -1]])/np.sqrt(3.0)\r\n return np.array([k_111[0, :], k_111[1, :], k_111[2, :], k_111[3, :],\r\n -k_111[0, :], -k_111[1, :], -k_111[2, :], -k_111[3, :]])", "def prepare_laplacian(laplacian):\n\n def estimate_lmax(laplacian, tol=5e-3):\n r\"\"\"Estimate the largest eigenvalue of an operator.\"\"\"\n lmax = sparse.linalg.eigsh(laplacian, k=1, tol=tol,\n ncv=min(laplacian.shape[0], 10),\n return_eigenvectors=False)\n lmax = lmax[0]\n lmax *= 1 + 2 * tol # Be robust to errors.\n return lmax\n\n def scale_operator(L, lmax, scale=1):\n r\"\"\"Scale the eigenvalues from [0, lmax] to [-scale, scale].\"\"\"\n I = sparse.identity(L.shape[0], format=L.format, dtype=L.dtype)\n L *= 2 * scale / lmax\n L -= I\n return L\n\n lmax = estimate_lmax(laplacian)\n laplacian = scale_operator(laplacian, lmax)\n\n laplacian = sparse.coo_matrix(laplacian)\n\n # PyTorch wants a LongTensor (int64) as indices (it'll otherwise convert).\n indices = np.empty((2, laplacian.nnz), dtype=np.int64)\n np.stack((laplacian.row, laplacian.col), axis=0, out=indices)\n indices = torch.from_numpy(indices)\n\n laplacian = torch.sparse_coo_tensor(indices, laplacian.data, laplacian.shape)\n laplacian = laplacian.coalesce() # More efficient subsequent operations.\n return laplacian", "def normalized(self):\n len = self.length\n return Vector(self.x / len, self.y / len)", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def _normalize(X: np.ndarray) -> np.ndarray:\n # return X * np.sqrt(1 / np.sum(X ** 2, axis=1))[:, None]\n return X * np.sqrt(X.shape[1] / np.sum(X ** 2, axis=1))[:, None]", "def _prepare_outer_matrix(self):\n self._mat_plane = numpy.array([\n self._scaling[0], 0, 0, 0,\n 0, self._scaling[1], 0, 0,\n 0, 0, 1, 0,\n self.i_border[0], -self.i_border[1], 0, 1\n ], dtype=numpy.float32)", "def _normalize(a: np.ndarray, u: float=0, s: float=1) -> np.ndarray:\n a_norm = (a - np.mean(a)) / (np.std(a) + STABILITY)\n a_rescaled = a_norm * s + u\n\n return a_rescaled", "def rownorm(a):\n return sum(array(a)**2,axis=1)**.5", "def normalize(self):\n\n if not self.magnitude():\n return Vector(0, 0)\n\n l = 1 / self.magnitude()\n return self.scale(l)", "def normalized(self):\n length = self.length\n if length != 0:\n return self/length\n return Vec2d(self)", "def normalize(self):\n self.vector /= np.linalg.norm(self.vector)", "def normalisation_l_inf(x):\n res = np.zeros(x.shape)\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n res[i,j] = x[i,j]/(np.max(x[i,j])+1e-5)\n return(res)", "def get_norma(self):\n return self.norma", "def normalize(input_matrix):\n\n row_sums = input_matrix.sum(axis=1)\n try:\n assert (np.count_nonzero(row_sums)==np.shape(row_sums)[0]) # no row should sum to zero\n except Exception:\n raise Exception(\"Error while normalizing. Row(s) sum to zero\")\n new_matrix = input_matrix / row_sums[:, np.newaxis]\n return new_matrix", "def normalize_matrix(matrix, min_val, max_val):\n return (max_val - min_val) * (matrix - np.min(matrix)) / (np.max(matrix) - np.min(matrix)) + min_val", "def L1Norm(X):\n return max(np.sum(X,axis=0))", "def test_LU(self):\n A = np.random.rand(10, 10)\n MA = to_matrix(A)\n ML, MU = MA.decomposeLU()\n self.assertEqual(ML*MU, MA)\n self.assertTrue(ML.is_lower_triangular())\n self.assertTrue(MU.is_upper_triangular())", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalization_calculation(self) -> None:\n self.normalized_inventory = (\n self.normalization_matrix * self.characterized_inventory\n )", "def normalize(self):\n return Vector(self.args + []) / self.magnitude()", "def normalize(x):\n\n x_norm = np.linalg.norm(x, axis=1, keepdims=True)\n print(x_norm)\n x = x / x_norm\n ### END\n\n return x", "def norm(self):", "def normalize(self):\n self.length = np.ones(self.nV)\n return self", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n norm2 = np.linalg.norm(x,2,axis = 1).reshape(x.shape[0],-1)\n x = x/norm2\n ### END YOUR CODE\n\n return x", "def lap_normalize(img, scale_n=4):\n # img = tf.expand_dims(img, 0)\n # print(\"Inside lap_normalize Function, img shape {}\".format(tf.shape(img)))\n\n tlevels = lap_split_n(img, scale_n)\n tlevels = list(map(normalize_std, tlevels))\n\n out = lap_merge(tlevels)\n\n return out[0, :, :, :]", "def unnormalize(self, inputs: Tensor) -> Tensor:\n # The normalizing constants are applied to the entire row, so add dummy outputs to the\n # inputs to make a row.\n row = torch.zeros_like(self._row_range)\n row[self._input_column_indices] = inputs\n row *= self._row_range\n row += self._row_min\n return row[self._input_column_indices]", "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def normalize(self):\n self.desc += \", normalize\"\n self._vecs /= np.linalg.norm(self._vecs, axis=1)[:, np.newaxis]\n self.reindex()", "def lup_decomposition(self):\n p = [i for i in range(self.rows_count())]\n for i in range(self.rows_count() - 1):\n pivot = i\n for j in range(i + 1, self.rows_count()):\n if abs(self[p[j], i]) > abs(self[p[pivot], i]):\n pivot = j\n p[pivot], p[i] = p[i], p[pivot]\n for j in range(i + 1, self.rows_count()):\n if abs(self[p[i], i]) < math.pow(10, -6):\n raise ValueError(\"Can't divide by 0\")\n self[p[j], i] /= self[p[i], i]\n for k in range(i + 1, self.rows_count()):\n self[p[j], k] -= self[p[j], i] * self[p[i], k]\n lst = []\n for i in p:\n lst.append(self.value[i])\n return p, Matrix(lst)", "def norm(self):\r\n old_origin = np.array(self.origin)\r\n self.origin = [0, 0, 0]\r\n old_origin[0] = old_origin[0] / self.x[0]\r\n old_origin[1] = old_origin[1] / self.y[1]\r\n old_origin[2] = old_origin[2] / self.z[2]\r\n self.data = ndimage.shift(self.data, -old_origin, mode='wrap')", "def normalize(self):\n self._vectors = [vector.normalized() for vector in self._vectors]", "def nll(self, data):\n nll = np.zeros(data.shape[0])\n self.a[0] = self.c\n for i in range(self.Ndim):\n a = self.rhos[i] * self.a[i]\n h = 0.5 * (a + np.abs(a)) # ReLU\n za = np.dot(self.Vs['alpha'][i].T, h) + self.bs['alpha'][i]\n zm = np.dot(self.Vs['mu'][i].T, h) + self.bs['mu'][i]\n zs = np.dot(self.Vs['sigma'][i].T, h) + self.bs['sigma'][i]\n self.alphas[i] = softmax(za)\n self.mus[i] = zm\n self.sigmas[i] = np.exp(zs)\n self.vars = self.sigmas ** 2.\n nll += nll_MOG_1D(data[:, i], self.alphas[i], self.mus[i],\n self.vars[i])[0]\n return nll", "def laplacian(self, array_in):\r\n\r\n # Call-through to Laplacian operator, already computed\r\n return self.laplace_op*array_in", "def normalize_lorentz_2d(\n lorentz, old_f_scale, old_v_scale, new_f_scale=(\n 0, 1, 1024), new_v_scale=(\n 0, 1, 1024)):\n lorentz_array = np.empty((0, 4))\n for i in range(0, lorentz.shape[0]):\n l0 = lorentz[i]\n l1 = normalize_lorentz_1d(\n l0, old_f_scale, old_v_scale, new_f_scale, new_v_scale)\n lorentz_array = np.append(lorentz_array, np.array([l1]), axis=0)\n return lorentz_array", "def normalise(a):\n return (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a))" ]
[ "0.6905368", "0.6712484", "0.6235044", "0.62139124", "0.62099594", "0.6200239", "0.61991894", "0.6155274", "0.6130805", "0.6119615", "0.6119615", "0.6039203", "0.594853", "0.5903943", "0.5903158", "0.58970135", "0.5886368", "0.58848", "0.58638287", "0.58587956", "0.58482677", "0.5803521", "0.5797989", "0.57870847", "0.5773412", "0.5770112", "0.5744329", "0.5739798", "0.5731862", "0.5724902", "0.5692082", "0.56882465", "0.56730175", "0.5665962", "0.5628615", "0.56279975", "0.5624041", "0.56159663", "0.56096995", "0.56046546", "0.55955446", "0.5574497", "0.55629605", "0.55278856", "0.550388", "0.55023104", "0.5484754", "0.54839694", "0.54776424", "0.5467214", "0.5464804", "0.5450003", "0.5447569", "0.5443797", "0.5432788", "0.5427415", "0.5418077", "0.54159373", "0.5415215", "0.54135424", "0.5413237", "0.54075545", "0.54046065", "0.5399458", "0.53968114", "0.5394406", "0.5386516", "0.53770787", "0.5357393", "0.5357184", "0.53548867", "0.5351279", "0.5349227", "0.5348093", "0.5346608", "0.5345797", "0.53454036", "0.5345001", "0.534491", "0.5343089", "0.5334035", "0.5331855", "0.53261834", "0.5325881", "0.5324536", "0.5322077", "0.53219295", "0.53175217", "0.5317103", "0.53123534", "0.5306175", "0.5303099", "0.5301771", "0.5301258", "0.5301113", "0.528586", "0.5285603", "0.5284992", "0.5280618", "0.5273994" ]
0.56505066
34
Solve for several eigenvectors.
def solve_for_eigenvectors(matrix, num, mode="general"): # Construct a sparse matrix if mode == "general": return linalg.eigs(matrix, num) if mode == "symmetric": return linalg.eigsh(matrix, num)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eigsolve(self,**kwargs):\n return eigsolve(self,**kwargs)", "def _solve_eigen(self, X, y):\n self.means_, self.neighbor_means_ = _class_means_and_neighbor_means(\n X, y, self.within_between_ratio, self.nearest_neighbor_ratio)\n \n Sw = _class_cov(X, y) # within class cov\n Sb = _local_pairwise_cov(self.means_, self.neighbor_means_)\n \n evals, evecs = linalg.eigh(Sb, Sw)\n evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors\n self.scalings_ = np.asarray(evecs)", "def eigensolve(self, epsilon=0.85):\n raise NotImplementedError(\"eigensolve Incomplete\")", "def eig(self,manifold_num):\n num_sites = len(self.energies[manifold_num])\n ham = self.manifold_hamiltonian(manifold_num).toarray()\n eigvals, eigvecs = eigh(ham)\n # Force degenerate eigenvectors to be orthogonal\n if self.qr_flag:\n eigvecs, r = np.linalg.qr(eigvecs,mode='reduced')\n if self.check_eigenvectors:\n HV = ham.dot(eigvecs)\n D = eigvecs.T.dot(HV)\n if np.allclose(D,np.diag(eigvals),rtol=1E-11,atol=1E-11):\n pass\n else:\n # warnings.warn('Eigenvalues altered by QR factorization, max absolute change in diagonal matrix of {}'.format(np.max(D-np.diag(eigvals))))\n warnings.warn('Using eigenvectors to diagonalize hamiltonian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))\n \n sort_indices = eigvals.argsort()\n eigvals.sort()\n eigvecs = eigvecs[:,sort_indices]\n if self.qr_flag:\n r = r[:,sort_indices]\n self.r_mats.append(r)\n # I choose to pick the phase of my eigenvectors such that the state which has the\n # largest overlap has a positive overlap. For sufficiently small d, and alpha close\n # to 1, this will be the overlap between the same excited and ground states.\n for i in range(eigvals.size):\n max_index = np.argmax(np.abs(eigvecs[:,i]))\n if eigvecs[max_index,i] < 0:\n eigvecs[:,i] *= -1\n\n return eigvals, eigvecs", "def calc_eigenvectors(verts: [[float]]):\n A = np.zeros((3, len(verts)))\n\n A[0] = np.array([x[0] for x in verts]) # First row is all the X_coords\n A[1] = np.array([x[1] for x in verts]) # second row is all the Y_coords\n A[2] = np.array([x[2] for x in verts]) # third row is all the z-coords\n \n A_cov = np.cov(A) # This is returns a 3x3\n eigenvalues, eigenvectors = np.linalg.eigh(A_cov)\n\n return eigenvalues, eigenvectors", "def numpy_eigenvectors(A):\n import numpy\n A = numpy.array(A)\n E, V = numpy.linalg.eigenvectors(A)\n import Numeric\n E = Numeric.array(E)\n V = Numeric.array(V)\n return E, V", "def plot_eigenvectors(self, **parameter_overrides):\n par, arr_keys, _ = self._parse_parameter_overrides(\n **parameter_overrides)\n states = [r'\\phi', r'\\delta', r'\\dot{\\phi}', r'\\dot{\\delta}']\n\n eval_seq, evec_seq = self.calc_eigen(**parameter_overrides)\n eval_seq, evec_seq = np.atleast_2d(eval_seq), np.atleast_3d(evec_seq)\n\n if eval_seq.shape[0] > 10:\n msg = ('Plots will be too large, use fewer than 11 values in the '\n 'varied parameter.')\n raise ValueError(msg)\n\n # TODO : sort_eigenmodes() is doing something funny and not adding the\n # 4th eigenvalue, so you often end up with duplicates eigenvalues for\n # one eigenval and one missing. Also the algorithm may not work well\n # with spaced out eigenvalues, which is what I've been trying here. You\n # may have to calculate eigenvals/vec across closer spacing, then\n # sample out the ones you want. For now, we don't sort coarse spaced\n # eigenvalues.\n # if arr_keys:\n # eval_seq, evec_seq = sort_eigenmodes(eval_seq, evec_seq)\n\n fig, axes = plt.subplots(*eval_seq.shape,\n subplot_kw={'projection': 'polar'})\n axes = np.atleast_2d(axes)\n fig.set_size_inches(axes.shape[1]*3, axes.shape[0]*3)\n lw = list(range(1, len(states) + 1))\n lw.reverse()\n\n for k, (evals, evecs, par_val) in enumerate(zip(eval_seq, evec_seq,\n par[arr_keys[0]])):\n\n axes[k, 0].set_ylabel('{} = {:1.2f}'.format(arr_keys[0], par_val),\n labelpad=30)\n\n for i, (eigenval, eigenvec) in enumerate(zip(evals, evecs.T)):\n\n max_com = np.abs(eigenvec[:2]).max()\n\n for j, component in enumerate(eigenvec[:2]):\n\n radius = np.abs(component)/max_com\n theta = np.angle(component)\n axes[k, i].plot([0, theta], [0, radius], lw=lw[j])\n\n axes[k, i].set_rmax(1.0)\n msg = r'Eigenvalue: {:1.3f}'\n if eigenval.real >= 0.0:\n fontcolor = 'red' # red indicates unstable\n else:\n fontcolor = 'black'\n axes[k, i].set_title(msg.format(eigenval),\n fontdict={'color': fontcolor})\n\n axes[0, 0].legend(['$' + s + '$' for s in states],\n loc='upper center', bbox_to_anchor=(0.5, 1.05),\n fancybox=True, shadow=True, ncol=4)\n\n fig.tight_layout()\n\n return axes", "def calculate_eigens(self):\n covariance_matrix = np.cov(self.predictor_vars_train.T)\n eigenvalues, eigenvectors = np.linalg.eig(covariance_matrix)\n idx = eigenvalues.argsort()[::-1]\n # Create \"All\" version\n self.eigenvalues_all = eigenvalues[idx]\n self.eigenvectors_all = eigenvectors[:, idx]\n # Create selected percentage version with cutoff\n eigenvalues_pct = self.eigenvalues_all / np.sum(self.eigenvalues_all)\n self.pct_var_exp_cumulative_all = np.cumsum(eigenvalues_pct)\n self.pct_var_exp_cumulative = self.pct_var_exp_cumulative_all[\n self.pct_var_exp_cumulative_all <= self.variance_explained_cutoff\n ]\n self.eigenvectors = self.eigenvectors_all[:, : len(self.pct_var_exp_cumulative)]\n self.eigenvalues = self.eigenvalues_all[: len(self.pct_var_exp_cumulative)]", "def eigvals(self):\n raise NotImplementedError", "def compute_eigvals(*params, **hyperparams):\n phi = params[0]\n d, t = hyperparams[\"dimension\"]\n\n if qml.math.get_interface(phi) == \"tensorflow\":\n phase = qml.math.exp(1j * qml.math.cast_like(phi, 1j))\n minus_phase = qml.math.exp(-1j * qml.math.cast_like(phi, 1j))\n return stack_last([phase if index < d else minus_phase for index in range(t)])\n\n arg = 1j * phi\n prefactors = qml.math.array([1 if index < d else -1 for index in range(t)], like=phi)\n\n if qml.math.ndim(phi) == 0:\n product = arg * prefactors\n else:\n product = qml.math.outer(arg, prefactors)\n return qml.math.exp(product)", "def get_eigenvectors(self):\n return self.eigenVectors", "def GetEigenvectors(self):\n\t\treturn self.Solver.GetEigenvectors()", "def eigen(M):\n values, vectors = np.linalg.eig(M)\n return values, vectors", "def eigensys(self, evals_count):\n hamiltonian_mat = self.hamiltonian()\n evals, evecs = hamiltonian_mat.eigenstates(eigvals=evals_count)\n return evals, evecs", "def mesh_laplacian_eigenvectors(mesh, nb_vectors=1):\n lap, lap_b = compute_mesh_laplacian(mesh, lap_type=\"fem\")\n w, v = eigsh(lap.tocsr(), nb_vectors + 1, M=lap_b.tocsr(), sigma=solver_tolerance)\n return v[:, 1:]", "def eigvals(input):\n\n is_input_dparray = isinstance(input, dparray)\n\n if (not use_origin_backend(input) and is_input_dparray):\n if (input.size > 0):\n return dpnp_eigvals(input)\n\n return call_origin(numpy.linalg.eigvals, input)", "def eigs(self,num_eigvals,manifold_num):\n num_sites = len(self.energies[manifold_num])\n ham = self.manifold_hamiltonian(manifold_num)\n eigvals, eigvecs = eigsh(ham,k=num_eigvals*num_sites,which='SM')\n # Force degenerate eigenvectors to be orthogonal\n if self.qr_flag:\n eigvecs, r = np.linalg.qr(eigvecs,mode='reduced')\n if self.check_eigenvectors:\n HV = ham.dot(eigvecs)\n D = eigvecs.T.dot(HV)\n if np.allclose(D,np.diag(eigvals),rtol=1E-11,atol=1E-11):\n pass\n else:\n # warnings.warn('Eigenvalues altered by QR factorization, max absolute change in diagonal matrix of {}'.format(np.max(D-np.diag(eigvals))))\n warnings.warn('Using eigenvectors to diagonalize hamiltonian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))\n \n sort_indices = eigvals.argsort()\n eigvals.sort()\n eigvecs = eigvecs[:,sort_indices]\n if self.qr_flag:\n r = r[:,sort_indices]\n self.r_mats.append(r)\n # I choose to pick the phase of my eigenvectors such that the state which has the\n # largest overlap has a positive overlap. For sufficiently small d, and alpha close\n # to 1, this will be the overlap between the same excited and ground states.\n for i in range(num_eigvals):\n max_index = np.argmax(np.abs(eigvecs[:,i]))\n if eigvecs[max_index,i] < 0:\n eigvecs[:,i] *= -1\n\n return eigvals, eigvecs", "def test_eigen_multiple_neighborhoods(self):\n # vectorized version\n t0 = time.time()\n extract_vect = EigenValueVectorizeFeatureExtractor()\n eigvals_vect = extract_vect.extract(self.point_cloud, self.neigh, None, None, None)\n print('Timing Vectorize : {}'.format((time.time() - t0)))\n eigvals_vect = np.vstack(eigvals_vect[:3]).T\n\n # serial version\n eigvals = []\n t0 = time.time()\n for n in self.neigh:\n extract = EigenValueSerial()\n eigvals.append(extract.extract(self.point_cloud, n, None, None, None))\n print('Timing Serial : {}'.format((time.time() - t0)))\n eigvals = np.array(eigvals)\n\n np.testing.assert_allclose(eigvals_vect, eigvals)", "def get_eigenvectors(self):\n return self._eigenvectors", "def MakeEigenVectors( self ): \n sqrt2 = np.sqrt(2)\n Isqrt2 = 1.0 / sqrt2\n EVectors = np.asarray( [ [ Isqrt2 , Isqrt2 , 0 ] ,\n [ Isqrt2 ,-Isqrt2 , 0 ] , \n [ 0 , 0 , 1 ] ] )\n for i in range( self.NQ ):\n for j in range( self.Nbranches ):\n self.EigenVectors[ i , j , 0 , : ] = EVectors[ j , : ]", "def eigenpairs(mesh, nb_eig):\n lap, lap_b = sdg.compute_mesh_laplacian(mesh, lap_type='fem')\n eig_val, eig_vec = eigsh(lap.tocsr(), nb_eig, M=lap_b.tocsr(),\n sigma=1e-6, which='LM')\n return eig_val, eig_vec, lap_b.tocsr()", "def calculate_eigenvalues(H):\n eigenvalues, eigenvectors = np.linalg.eigh(H)\n return eigenvalues, eigenvectors", "def calculate_eigenvalues(self):\n self.__eigenvalues = []\n dictionary = np.linalg.eig(np.array(self.__A))\n indicator = True\n sum1 = 0\n for i in range(self.__A.shape[0]):\n if all(self.__A[i, j] == 0 for j in range(self.__A.shape[1])):\n indicator = all(self.__B[i,j] for j in range(self.__B.shape[1]))\n if (indicator):\n sum1 += 1\n \n for val in dictionary[0]:\n if (val != 0):\n self.__eigenvalues.append(complex(val))\n elif (indicator) and (sum1 > 0):\n sum1 -= 1\n self.__eigenvalues.append(complex(val))", "def project_to_eigenvectors(X, vecs):\n\n return (X-np.mean(X, axis=0)).dot(np.transpose(vecs)) #PCA assumes that the data is centered, so we need to do that before doing the calculations", "def run_exact(self):\n self.operator, var_form, opt = self.generate_VQE_args()\n\n exact_eigensolver = ExactEigensolver(self.operator, k=1)\n self.result = exact_eigensolver.run()\n\n solution = self.extract_solution(self.result, True)\n return solution", "def solve(self, sparse_args=None):\n Hmat = self.Hamiltonian()\n if sparse_args is not None: self.sparse_args = sparse_args\n if self.sparse_args is None:\n en, ev = eig(Hmat.todense())\n else:\n en, ev = eigsh(Hmat, **self.sparse_args)\n ev = transpose(array(ev))[argsort(en)]\n en = sort(en)\n self.en = en\n self.ev = ev\n self.solved = True\n return self.en, self.ev", "def eigen_decomposition(self):\n w, V = linalg.eigh(self.K)\n c = w[::-1]\n if isinstance(self.num_xi, float):\n percent_energy = np.cumsum(c) / np.sum(c)\n self.num_xi = np.arange(c.shape[0])[percent_energy < self.num_xi][-1] # num_xi changes\n self.Lambda = w[::-1][:self.num_xi]\n self.V = V[:, ::-1][:, :self.num_xi]", "def compute_eigendecomp(self, atol=1e-13, rtol=None):\n self.eigvals, self.eigvecs = parallel.call_and_bcast(\n util.eigh, self.correlation_array, atol=atol, rtol=rtol,\n is_positive_definite=True)", "def calc_eigen(self, left=False, **parameter_overrides):\n A, B = self.form_state_space_matrices(**parameter_overrides)\n\n if len(A.shape) == 3: # array version\n evals = np.zeros(A.shape[:2], dtype='complex128')\n evecs = np.zeros(A.shape, dtype='complex128')\n for i, Ai in enumerate(A):\n if left:\n Ai = np.transpose(Ai)\n evals[i], evecs[i] = np.linalg.eig(Ai)\n return evals, evecs\n else:\n if left:\n A = np.transpose(A)\n return np.linalg.eig(A)", "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n w,v=np.linalg.eig(matrix)\n ### END YOUR CODE\n return w, v", "def eig_vals_vects(matrix, sort='imag', not_hermitian=True, verbose=False):\n # if len(matrix) < 10:\n # print '\\nFinding eigvals, matrix = ', matrix\n\n # check if hermitian:\n if not_hermitian:\n eigval, eigvect = np.linalg.eig(matrix)\n else:\n if (matrix == matrix.conj().T).all():\n if verbose:\n print 'Shortcut eigvect/vals since matrix is hermitian...'\n eigval, eigvect = np.linalg.eigh(matrix)\n else:\n if verbose:\n print 'matrix is not hermitian...'\n eigval, eigvect = np.linalg.eig(matrix)\n\n # use imaginary part to get ascending order of eigvals\n if sort == 'imag':\n si = np.argsort(np.imag(eigval))\n elif sort == 'real':\n si = np.argsort(np.real(eigval))\n else:\n si = np.arange(len(eigval))\n\n eigvect = np.array(eigvect)\n eigvect_out = eigvect.T[si]\n eigval_out = eigval[si]\n\n # if len(eigval_out) < 10:\n # print 'eigvals return as =', eigval_out\n\n return eigval_out, eigvect_out", "def get_eigvals_eigvects(\n num_layers,\n numeric_matrices_eV_over_angsquared,\n layer_mass_amu,\n use_banded_algorithm=False,\n):\n # Based on the units in input, and indicating with:\n # - [hbar omega] the numeric value for the frequency in meV => hbar omega = [hbar omega] * meV\n # - [K] the numeric value of K in eV/ang^2\n # - [m] the layer mass in amu\n # we have (we omit the sign, and for units considerations we 'drop' U):\n # omega^2 = K / m =>\n # (hbar omega)^2 = hbar^2 * K / m =>\n # [hbar omega]^2 * meV^2 = hbar^2 * [K] / [m] * eV/ang^2 / amu = [K] / [m] * hbar^2 * eV/ang^2 / amu =>\n # [hbar omega]^2 = = [K] / [m] * ( hbar^2 * eV/ang^2 / amu / meV^2 )\n # so that the conversion factor is the last bracketed term:\n # conversion_factor = hbar^2 * eV / (angstrom^2 * amu * meV^2)\n conversion_factor = 4180.15925\n # NOTE: for simplicity, the conversion is applied at the very end\n\n if use_banded_algorithm:\n # 3 blocks (below, same layer, and above) of size 3 => total width of 9\n # Since we only store the upper part, we only need a width of 4 (diagonal + 3 superdiagonals)\n K_matrix = np.zeros((4, num_layers * 3))\n else:\n K_matrix = np.zeros((num_layers * 3, num_layers * 3))\n\n # Note: I construct -K, actually\n for block_idx in range(num_layers):\n # Interaction with upper layer\n if block_idx < num_layers - 1: # Not in the last layer\n current_block = np.array(\n numeric_matrices_eV_over_angsquared[\n block_idx % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx + 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n # Interaction with lower layer\n if block_idx > 0: # Not in the first layer\n previous_block = np.array(\n numeric_matrices_eV_over_angsquared[\n (block_idx - 1) % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx - 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n\n # We want to get the eigenvalues of omega^2 U = - 1/M_layer K U\n K_matrix /= layer_mass_amu\n\n # Get frequencies (eigvals) and eigenvectors (for mode analysis)\n if use_banded_algorithm:\n eigvals, eigvects = scipy.linalg.eig_banded(K_matrix, lower=False)\n else:\n eigvals, eigvects = np.linalg.eigh(K_matrix)\n\n eigvals *= conversion_factor\n\n ## The first three should be acoustic i.e. almost zero; the rest should be positive\n ## I don't check as depending on the units it's hard to define a correct absolute energy\n # assert np.sum(np.abs(eigvals[:3])) < 1.0e-8\n\n # Remove the first three acoustic modes\n return eigvals[3:], eigvects[:, 3:]", "def eigenvalue_analysis(*, dates, obs_data, model_data, residuals,\n proxy_number=1):\n # Create a masked version of the residuals array so that we can perform the\n # PCA ignoring all nan values\n masked_residuals = np.ma.array(residuals, mask=np.isnan(residuals))\n\n # Calculate the covariance matrix of the masked residuals array\n covariance_matrix = np.ma.cov(masked_residuals, rowvar=False,\n allow_masked=True)\n # Calculate the eigenvalues and eigenvectors of the covariance matrix\n eig_values, eig_vectors = np.linalg.eigh(covariance_matrix)\n # Sort the eigenvalues in decreasing order\n idx = np.argsort(np.abs(eig_values))[::-1]\n eig_values = eig_values[idx]\n # Sort the eigenvectors according to the same index\n eig_vectors = eig_vectors[:, idx]\n\n # Project the residuals onto the eigenvectors\n projected_residuals = np.ma.dot(masked_residuals, eig_vectors)\n\n # Use the method of Cox et al (2018) to remove unmodelled external\n # signal in the SV residuals. The variable 'proxy' contains the sum\n # of the SV residuals projected into the number of dominant PCs\n # specified by proxy_number\n\n corrected_residuals = []\n\n if proxy_number == 1:\n noisy_direction = eig_vectors[:, 0]\n proxy = projected_residuals[:, 0]\n for idx in range(len(proxy)):\n corrected_residuals.append(\n masked_residuals.data[idx, :] - proxy[idx] * noisy_direction)\n # Apply the denoising algorithm to each of the PCs specified\n elif proxy_number > 1:\n noisy_direction = eig_vectors[:, 0:proxy_number]\n proxy = np.sum(projected_residuals[:, 0:proxy_number], axis=1)\n for idx in range(len(projected_residuals[:, 0])):\n corrected = masked_residuals.data[idx, :]\n for direction in range(proxy_number):\n corrected = corrected - projected_residuals[idx, direction] \\\n * noisy_direction[:, direction]\n corrected_residuals.append(corrected)\n\n corrected_residuals = pd.DataFrame(corrected_residuals,\n columns=obs_data.columns)\n # Re-form the SV from the denoised residuals\n denoised_sv = pd.DataFrame(\n corrected_residuals.values + model_data.values,\n columns=obs_data.columns)\n\n denoised_sv.insert(0, 'date', dates)\n\n return denoised_sv, proxy, np.abs(eig_values), eig_vectors,\\\n projected_residuals, corrected_residuals.astype('float'),\\\n covariance_matrix", "def computemodes(self):\n evals, modes = np.linalg.eig(self.A)\n return evals, modes", "def run_vqd(\n self,\n backend=Aer.get_backend(\"statevector_simulator\"),\n var_form=None,\n optimizer=None,\n reps=2,\n # reps=5,\n ):\n tmp = HermitianSolver(self.mat)\n max_eigval, vqe_result, vqe = tmp.run_vqe(\n backend=backend,\n var_form=var_form,\n optimizer=optimizer,\n reps=reps,\n mode=\"max_val\",\n )\n eigvals = [max_eigval]\n eigstates = [vqe_result.eigenstate]\n for r in range(len(tmp.mat) - 1):\n val, vqe_result, vqe = tmp.run_vqe(\n backend=backend,\n var_form=var_form,\n optimizer=optimizer,\n reps=reps,\n )\n outer_prod = np.outer(\n vqe_result.eigenstate, np.conj(vqe_result.eigenstate).T\n )\n tmp.mat = tmp.mat - (val - max_eigval) * outer_prod\n eigvals.append(val)\n eigstates.append(vqe_result.eigenstate)\n tmp = HermitianSolver(tmp.mat)\n\n eigvals = np.array(eigvals)\n eigstates = np.array(eigstates)\n order = np.argsort(eigvals)\n eigvals = eigvals[order]\n eigstates = eigstates[order]\n return eigvals, eigstates", "def eigen_vector_i_all(self):\n return self._eig_vec", "def eigen(X):\n\n symmetric = np.alltrue(np.isclose(X - X.T, np.zeros(n)))\n small = max(X.shape) <= 11\n\n if symmetric:\n return jacobi(X)\n elif small:\n maxiter = 10 ** max(*X.shape, 4)\n return qrm3(X, maxiter=maxiter)\n else:\n maxiter = 10 ** max(*X.shape, 4)\n return qrm2(X, maxiter=maxiter)", "def FindEigenstates(**args):\n\tprop = SetupProblem(**args)\n\n\t#use custom initial residual if provided\n\tinitialResidual = args.get(\"initialResidual\")\n\tif initialResidual != None:\n\t\tprop.psi.GetData()[:] = initialResidual.GetData()\n\n\t#find eigenstates\n\t#solver = pyprop.ArpackSolver(prop)\n\tsolver = pyprop.PiramSolver(prop)\n\tsolver.Solve()\n\treturn solver", "def _solve_principal_eig(a):\n w, v = np.linalg.eig(a)\n idx = np.argmax(w)\n eig_val = w[idx]\n eig_vec = v[:, idx]\n\n # Let eig_vec non-negative\n sign = 0\n i = 0\n while sign == 0 and i < len(eig_vec):\n sign = np.sign(eig_vec[i])\n i += 1\n if sign < 0:\n eig_vec *= -1\n\n return eig_val, eig_vec", "def eigen_decomposition(X, features):\n # Center to average\n Xctr = X - X.mean(0)\n # covariance matrix\n Xcov = np.cov(Xctr.T)\n\n # Compute eigenvalues and eigenvectors\n eigen_values, eigen_vectors = sp.linalg.eigh(Xcov)\n\n # Sort the eigenvalues and the eigenvectors descending\n sortedindex = np.argsort(eigen_values)[::-1]\n eigen_values = eigen_values[sortedindex]\n eigen_vectors = eigen_vectors[:, sortedindex]\n\n ###########\n y_pos = np.arange(len(features))\n weight = eigen_vectors[0]\n\n figure, axis = plt.subplots(2, 1)\n\n axis[0].bar(features, eigen_vectors[0])\n plt.setp(axis[0], title=\"First and Second Component's Eigenvectors \", ylabel='Weight')\n axis[0].set_xticks(features, features)\n axis[1].bar(features, eigen_vectors[1])\n axis[1].set_xticks(features, features)\n plt.setp(axis[1], ylabel='Weight')\n # axis[0].bar(y_pos, weight, align='center', alpha=0.5)\n # axis[0].xticks(y_pos, features)\n # axis[0].ylabel('Weight')\n # axis[0].title('Features')\n #\n # axis[1].bar(y_pos, weight, align='center', alpha=0.5)\n # axis[1].xticks(y_pos, features)\n # axis[1].ylabel('Weight')\n # axis[1].title('Features')\n\n plt.show()\n # return eigen_values, eigen_vectors", "def eig_vals_vects(self, matrix=None, attribute=False, attribute_matrix=False):\n if matrix is None:\n matrix = self.get_matrix(attribute=attribute_matrix)\n eigval, eigvect = np.linalg.eig(matrix)\n si = np.argsort(np.imag(eigval))\n eigvect = np.array(eigvect)\n eigvect = eigvect.T[si]\n eigval = eigval[si]\n\n if attribute:\n self.eigvect = eigvect\n self.eigval = eigval\n\n # print 'np.shape(eigvect) = ', np.shape(eigvect)\n # sys.exit()\n return eigval, eigvect", "def posdef_eig_svd(mat):\n evals, evecs, _ = tf.svd(mat)\n\n return evals, evecs", "def get_eigen_values_and_vectors(matrix, num_values):\n w, v = eigen_decomp(matrix)\n eigen_values = []\n eigen_vectors = []\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return eigen_values, eigen_vectors", "def scipy_eigsolver(\n kernel_matrix: Union[np.ndarray, scipy.sparse.csr_matrix],\n n_eigenpairs: int,\n is_symmetric: bool,\n is_stochastic: bool,\n):\n\n n_samples, n_features = kernel_matrix.shape\n\n # check only for n_eigenpairs == n_features and n_eigenpairs < n_features\n # wrong parametrized n_eigenpairs are catched in scipy functions\n if n_eigenpairs == n_features:\n if is_symmetric:\n scipy_eigvec_solver = scipy.linalg.eigh\n else:\n scipy_eigvec_solver = scipy.linalg.eig\n\n solver_kwargs: Dict[str, object] = {\n \"check_finite\": False\n } # should be already checked\n\n else: # n_eigenpairs < matrix.shape[1]\n if is_symmetric:\n scipy_eigvec_solver = scipy.sparse.linalg.eigsh\n else:\n scipy_eigvec_solver = scipy.sparse.linalg.eigs\n\n solver_kwargs = {\n \"k\": n_eigenpairs,\n \"which\": \"LM\",\n \"v0\": np.ones(n_samples),\n \"tol\": 1e-14,\n }\n\n # The selection of sigma is a result of a microbenchmark\n if is_symmetric and is_stochastic:\n # NOTE: it turned out that for self.kernel_.is_symmetric=False (-> eigs),\n # setting sigma=1 resulted into a slower computation.\n NUMERICAL_EXACT_BREAKER = 0.1\n solver_kwargs[\"sigma\"] = 1.0 + NUMERICAL_EXACT_BREAKER\n solver_kwargs[\"mode\"] = \"normal\"\n else:\n solver_kwargs[\"sigma\"] = None\n\n # the scipy solvers only work on floating points\n if scipy.sparse.issparse(\n kernel_matrix\n ) and kernel_matrix.data.dtype.kind not in [\"fdFD\"]:\n kernel_matrix = kernel_matrix.asfptype()\n elif isinstance(kernel_matrix, np.ndarray) and kernel_matrix.dtype != \"f\":\n kernel_matrix = kernel_matrix.astype(float)\n\n eigvals, eigvects = scipy_eigvec_solver(kernel_matrix, **solver_kwargs)\n\n return eigvals, eigvects", "def fit(self, X):\n mean_vec = X.mean(axis=0)\n cov = (X - mean_vec).T.dot((X - mean_vec)) / (X.shape[0]-1)\n eigenvalues, eigenvectors = np.linalg.eig(cov)\n idx = np.abs(eigenvalues)\n self.best_eigenvalues = idx[:self.n_components]\n self.best_eigenvalues = eigenvectors[:, :self.n_components]\n eigenvectors = np.atleast_1d(eigenvectors)[:, :self.n_components]", "def getEigenVectors(X_new, elambda):\n X_cov = X_new.T.dot(X_new)\n eigenvalues, eigenvectors = np.linalg.eig(X_cov)\n eigen_sorted_ind = eigenvalues.argsort()[::-1]\n eigenvalues = eigenvalues / eigenvalues.sum()\n eigenvalues_sum = 0\n eigenvectors_selected = eigenvectors\n for i in xrange(len(eigen_sorted_ind)):\n eigenvalues_sum += eigenvalues[eigen_sorted_ind[i]]\n if eigenvalues_sum >= elambda:\n eigenvectors_selected = eigenvectors[:, eigen_sorted_ind[0:i]]\n break\n return eigenvectors_selected", "def compute_eigvals(theta, pauli_word): # pylint: disable=arguments-differ\n if qml.math.get_interface(theta) == \"tensorflow\":\n theta = qml.math.cast_like(theta, 1j)\n\n # Identity must be treated specially because its eigenvalues are all the same\n if set(pauli_word) == {\"I\"}:\n exp = qml.math.exp(-0.5j * theta)\n ones = qml.math.ones(2 ** len(pauli_word), like=theta)\n if qml.math.get_interface(theta) == \"tensorflow\":\n ones = qml.math.cast_like(ones, 1j)\n\n if qml.math.ndim(theta) == 0:\n return exp * ones\n\n return qml.math.tensordot(exp, ones, axes=0)\n\n return MultiRZ.compute_eigvals(theta, len(pauli_word))", "def get_eigen_values_and_vectors(matrix, num_values):\n (w, v) = eigen_decomp(matrix)\n eigen_values = []\n eigen_vectors = []\n ### YOUR CODE HERE\n max_indexs=np.argpartition(w, -num_values)\n max_indexs=max_indexs[-num_values:]\n ids=np.argsort(w[max_indexs])\n sort_index=max_indexs[ids]\n eigen_values=w[sort_index]\n eigen_vectors=v[:,sort_index]\n ### END YOUR CODE\n return eigen_values, eigen_vectors", "def right_eigenvectors(matrix, nvals=None):\n matrix = np.asarray(matrix)\n return _eigenvectors(matrix, nvals)", "def fit_evd(self):\n\n # EVD only work on square matrices as we need to compute the eigenvalues and eigenvectors\n # For this we compute the covariance matrix K\n # K should be n x n matrix (pixels x pixels)\n\n # The covariance matrix is nxn\n self.cov_matrix = np.zeros(shape=[self.n_features, self.n_features], dtype='uint8')\n\n self.cov_matrix = np.cov(self.norm_matrix, rowvar=False)\n # C is a symmetric matrix and so it can be diagonalized:\n eig_val, eig_vec = linalg.eig(self.cov_matrix)\n\n # Sorting the eigenvectors by decreasing eigenvalues\n # [Start : stop : stepcount] stepcount is reversed\n idx = eig_val.argsort()[::-1]\n eig_val, eig_vec = eig_val[idx], eig_vec[:, idx]\n\n # Explained_variance tell us how much of the variance in the data each eigen value explains\n explained_variance = eig_val / (self.n_samples - 1)\n # total_var is the total variance in the data\n total_var = explained_variance.sum()\n explained_variance_ratio = explained_variance / total_var\n # The cumulative sum of all ratios\n ratio_cumsum = np.cumsum(explained_variance_ratio)\n\n # We search in the cumsum for the index of the value which, when added, corresponds to the quality_percent\n # The index of the cumsum gives us the components we need to add to explain X quality percent of our data\n n_components = np.searchsorted(ratio_cumsum, self.quality_percent, side='right') + 1\n\n self.components = eig_vec[:n_components]\n print(\"The principal components have been calculated using eigendecomposition\", self.components.shape)\n\n return self.components", "def test_svd(self):\n eigenvectors, eigenvalues = self.svd.run(self.test_matrix)\n\n self.assertEqual(eigenvectors.shape, (100, 100))\n self.assertEqual(eigenvalues.shape, (100,))", "def analytical_eig(A):\n n = len(A)\n h = 1/float(n)\n d = 2/float(h)**2\n a = -1/float(h)**2\n eigenval = np.empty(n)\n for j in range(1,n+1):\n eigenval[j-1] = d + 2*a*np.cos((j*np.pi)/(float(n)+1)) # Analytic solution\n \n return eigenval", "def readEigenvectors(self,phys,eigname):\r\n EigenvectorReader.EigenvectorReader(self.checkPath(eigname)).read(phys.myEig)", "def eigenvects(mat):\n # Check if symbols are present\n if hasSymbols(mat):\n return mat.eigenvects()\n # Purely numeric matrix\n newMat = recursiveEvaluate(mat.as_mutable())\n return newMat.eigenvects()", "def eig_vals_vects_hermitian(matrix, sort='imag'):\n # if len(matrix) < 10:\n # print '\\nFinding eigvals, matrix = ', matrix\n eigval, eigvect = np.linalg.eig(matrix)\n # use imaginary part to get ascending order of eigvals\n if sort == 'imag':\n si = np.argsort(np.imag(eigval))\n elif sort == 'real':\n si = np.argsort(np.real(eigval))\n else:\n si = np.arange(len(eigval))\n\n eigvect = np.array(eigvect)\n eigvect_out = eigvect.T[si]\n eigval_out = eigval[si]\n if len(eigval_out) < 10:\n print 'eigvals return as =', eigval_out\n return eigval_out, eigvect_out", "def explore_eigenvalues(self, X_train, X_test) :\r\n\r\n # scaler = StandardScaler()\r\n # X_train = scaler.fit_transform(X_train)\r\n # X_test = scaler.transform(X_test)\r\n\r\n print (\"Train feature shape:\", X_train.shape)\r\n print(\"Train feature shape:\", X_test.shape)\r\n \r\n\r\n mean_vec = np.mean(X_test, axis=0)\r\n cov_mat = (X_test - mean_vec).T.dot((X_test - mean_vec)) / (X_test.shape[0]-1)\r\n print('Covariance matrix \\n%s' %cov_mat)\r\n print('NumPy covariance matrix: \\n%s' %np.cov(X_test.T))\r\n eig_vals, eig_vecs = np.linalg.eig(cov_mat)\r\n print('Eigenvectors \\n%s' %eig_vecs)\r\n print(\"---------------------------------\")\r\n print('\\nEigenvalues \\n%s' %eig_vals)\r\n # Make a list of (eigenvalue, eigenvector) tuples\r\n eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]\r\n\r\n # Sort the (eigenvalue, eigenvector) tuples from high to low\r\n eig_pairs.sort(key=lambda x: x[0], reverse=True)\r\n\r\n # Visually confirm that the list is correctly sorted by decreasing eigenvalues\r\n print(\"---------------------------------\")\r\n print('Eigenvalues in descending order:')\r\n for i in eig_pairs:\r\n print(i[0])\r\n # tot = sum(eig_vals)\r\n # var_exp = [(i / tot)*100 for i in sorted(eig_vals, reverse=True)]\r\n # print(\"Explained variance : \\n{}\".format(var_exp))\r\n \r\n\r\n return X_train, X_test", "def eigsh(A, M = None, k = 6, sigma = None, which = 'LM', v0=None,\n ncv = None, maxiter = None, tol = 0., return_eigenvectors = True,\n Minv = None, OPinv = None, mode = 'normal'):\n if M is not None:\n raise NotImplementedError(\"M is not currently supported!\")\n if v0 is not None:\n raise NotImplementedError(\"v0 is not currently supported!\")\n if ncv is not None:\n raise NotImplementedError(\"ncv is not currently supported!\")\n if Minv is not None:\n raise NotImplementedError(\"Minv is not currently supported!\")\n if OPinv is not None:\n raise NotImplementedError(\"OPinv is not currently supported!\")\n inp_data = FrovedisFeatureData(A, dense_kind='rowmajor')\n X = inp_data.get()\n x_dtype = inp_data.get_dtype()\n x_itype = inp_data.get_itype()\n dense = inp_data.is_dense()\n nrows = inp_data.numRows()\n ncols = inp_data.numCols()\n\n if nrows != ncols:\n raise ValueError('expected squared symmetric matrix (shape=%s)' % (inp_data.shape,))\n if k <= 0:\n raise ValueError('k must be greater than 0.')\n if k >= nrows:\n raise ValueError('k must be less than or equal to N for N * N square matrix.')\n if sigma is not None and not dense:\n raise ValueError('currently sigma is only supported for dense matrices.')\n if sigma is None:\n sigma = np.finfo(np.float32).max\n\n if which not in ['LM', 'SM', 'LA', 'SA', 'BE']:\n raise ValueError('which must be one of LM, SM, LA, SA, or BE')\n if mode in ['buckling', 'cayley']:\n raise ValueError('currenly normal mode is only supported!')\n if maxiter is None:\n maxiter = 10 * nrows\n wantEv = return_eigenvectors\n (host, port) = FrovedisServer.getServerInstance()\n res = rpclib.compute_eigsh(host, port, X.get(),\n k, which.encode('ascii'),\n sigma, maxiter, wantEv,\n tol, x_dtype,\n x_itype, dense)\n excpt = rpclib.check_server_exception()\n if excpt[\"status\"]:\n raise RuntimeError(excpt[\"info\"])\n sptr = res[\"eigenval\"]\n uptr = res[\"eigenvec\"]\n m_m = res['m']\n k_k = res['k']\n eigval = FrovedisVector({'dptr' : sptr, 'size' : k_k},\n dtype = TypeUtil.to_numpy_dtype(x_dtype)).to_numpy_array()\n if wantEv:\n eigvec = FrovedisDenseMatrix('C', {'dptr' : uptr, 'nrow' : m_m, 'ncol' : k_k},\n dtype = TypeUtil.to_numpy_dtype(x_dtype)).to_numpy_array()\n return eigval, eigvec\n else:\n return eigval", "def compute_eigvals(phi): # pylint: disable=arguments-differ\n if qml.math.get_interface(phi) == \"tensorflow\":\n phi = qml.math.cast_like(phi, 1j)\n\n signs = np.array([1, -1, 0, 0])\n if qml.math.ndim(phi) == 0:\n return qml.math.exp(0.5j * phi * signs)\n\n return qml.math.exp(qml.math.tensordot(0.5j * phi, signs, axes=0))", "def eigensystem(mat):\n e, v = numpy.linalg.eig(mat)\n\n # `eig` returns complex results but we know all of the\n # eigenstates have real energy.\n e = numpy.real(e)\n\n items = zip(e, v.T)\n items = sorted(items, key = operator.itemgetter(0))\n e, v = zip(*items)\n\n return (e, v)", "def test_inverse_eigenvectors_non_interacting(self, size):\n t_nn = 1.2\n idx = np.arange(size)\n g0_inv_full = np.zeros((size, size), dtype=complex)\n g0_inv_full[idx[:-1], idx[1:]] = g0_inv_full[idx[1:], idx[:-1]] = t_nn\n for g0 in self.g0_loc_inv:\n g0_inv_full[idx, idx] = g0\n rv, h, rv_inv = gt.matrix.decompose_gf(g0_inv_full)\n assert_allclose(rv.dot(rv_inv), np.identity(*h.shape), atol=1e-14)", "def eigenvalue_decomposition (a_t_a_matrix ):\r\n # get eigenvalues and -vectors from ATA matrix\r\n eigenvalues = np.zeros (a_t_a_matrix.shape[0] )\r\n eigenvectors = np.zeros ((a_t_a_matrix.shape[0], a_t_a_matrix.shape[0] ))\r\n evals, evecs = np.linalg.eig (a_t_a_matrix )\r\n\r\n # sort them\r\n indices = np.argsort (-evals ) # reverse sort: greatest numbers first\r\n for loop_count, index in enumerate(indices ):\r\n eigenvalues[loop_count] = evals[index]\r\n eigenvectors[:, loop_count] = evecs[:, index]\r\n\r\n # get the normal vector, normalize it and if it's turned to the ground, turn it around\r\n normal_vector = normalize_vector (eigenvectors[:, -1] ) # the last (smallest) vector is the normal vector\r\n if (normal_vector[2] < 0):\r\n normal_vector = normal_vector * -1\r\n\r\n return normal_vector, eigenvalues[-1]", "def eig(C):\r\n\r\n # class eig(object):\r\n # def __call__(self, C):\r\n\r\n # Householder transformation of a symmetric matrix V into tridiagonal form.\r\n # -> n : dimension\r\n # -> V : symmetric nxn-matrix\r\n # <- V : orthogonal transformation matrix:\r\n # tridiag matrix == V * V_in * V^t\r\n # <- d : diagonal\r\n # <- e[0..n-1] : off diagonal (elements 1..n-1)\r\n\r\n # Symmetric tridiagonal QL algorithm, iterative\r\n # Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3 operations\r\n # -> n : Dimension.\r\n # -> d : Diagonale of tridiagonal matrix.\r\n # -> e[1..n-1] : off-diagonal, output from Householder\r\n # -> V : matrix output von Householder\r\n # <- d : eigenvalues\r\n # <- e : garbage?\r\n # <- V : basis of eigenvectors, according to d\r\n\r\n\r\n # tred2(N, B, diagD, offdiag); B=C on input\r\n # tql2(N, diagD, offdiag, B);\r\n\r\n # private void tred2 (int n, double V[][], double d[], double e[]) {\r\n def tred2 (n, V, d, e):\r\n # This is derived from the Algol procedures tred2 by\r\n # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for\r\n # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding\r\n # Fortran subroutine in EISPACK.\r\n\r\n num_opt = False # factor 1.5 in 30-D\r\n\r\n for j in range(n):\r\n d[j] = V[n-1][j] # d is output argument\r\n\r\n # Householder reduction to tridiagonal form.\r\n\r\n for i in range(n-1,0,-1):\r\n # Scale to avoid under/overflow.\r\n h = 0.0\r\n if not num_opt:\r\n scale = 0.0\r\n for k in range(i):\r\n scale = scale + abs(d[k])\r\n else:\r\n scale = sum(abs(d[0:i]))\r\n\r\n if scale == 0.0:\r\n e[i] = d[i-1]\r\n for j in range(i):\r\n d[j] = V[i-1][j]\r\n V[i][j] = 0.0\r\n V[j][i] = 0.0\r\n else:\r\n\r\n # Generate Householder vector.\r\n if not num_opt:\r\n for k in range(i):\r\n d[k] /= scale\r\n h += d[k] * d[k]\r\n else:\r\n d[:i] /= scale\r\n h = np.dot(d[:i],d[:i])\r\n\r\n f = d[i-1]\r\n g = h**0.5\r\n\r\n if f > 0:\r\n g = -g\r\n\r\n e[i] = scale * g\r\n h = h - f * g\r\n d[i-1] = f - g\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] = 0.0\r\n else:\r\n e[:i] = 0.0\r\n\r\n # Apply similarity transformation to remaining columns.\r\n\r\n for j in range(i):\r\n f = d[j]\r\n V[j][i] = f\r\n g = e[j] + V[j][j] * f\r\n if not num_opt:\r\n for k in range(j+1, i):\r\n g += V[k][j] * d[k]\r\n e[k] += V[k][j] * f\r\n e[j] = g\r\n else:\r\n e[j+1:i] += V.T[j][j+1:i] * f\r\n e[j] = g + np.dot(V.T[j][j+1:i],d[j+1:i])\r\n\r\n f = 0.0\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] /= h\r\n f += e[j] * d[j]\r\n else:\r\n e[:i] /= h\r\n f += np.dot(e[:i],d[:i])\r\n\r\n hh = f / (h + h)\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] -= hh * d[j]\r\n else:\r\n e[:i] -= hh * d[:i]\r\n\r\n for j in range(i):\r\n f = d[j]\r\n g = e[j]\r\n if not num_opt:\r\n for k in range(j, i):\r\n V[k][j] -= (f * e[k] + g * d[k])\r\n else:\r\n V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])\r\n\r\n d[j] = V[i-1][j]\r\n V[i][j] = 0.0\r\n\r\n d[i] = h\r\n # end for i--\r\n\r\n # Accumulate transformations.\r\n\r\n for i in range(n-1):\r\n V[n-1][i] = V[i][i]\r\n V[i][i] = 1.0\r\n h = d[i+1]\r\n if h != 0.0:\r\n if not num_opt:\r\n for k in range(i+1):\r\n d[k] = V[k][i+1] / h\r\n else:\r\n d[:i+1] = V.T[i+1][:i+1] / h\r\n\r\n for j in range(i+1):\r\n if not num_opt:\r\n g = 0.0\r\n for k in range(i+1):\r\n g += V[k][i+1] * V[k][j]\r\n for k in range(i+1):\r\n V[k][j] -= g * d[k]\r\n else:\r\n g = np.dot(V.T[i+1][0:i+1], V.T[j][0:i+1])\r\n V.T[j][:i+1] -= g * d[:i+1]\r\n\r\n if not num_opt:\r\n for k in range(i+1):\r\n V[k][i+1] = 0.0\r\n else:\r\n V.T[i+1][:i+1] = 0.0\r\n\r\n\r\n if not num_opt:\r\n for j in range(n):\r\n d[j] = V[n-1][j]\r\n V[n-1][j] = 0.0\r\n else:\r\n d[:n] = V[n-1][:n]\r\n V[n-1][:n] = 0.0\r\n\r\n V[n-1][n-1] = 1.0\r\n e[0] = 0.0\r\n\r\n\r\n # Symmetric tridiagonal QL algorithm, taken from JAMA package.\r\n # private void tql2 (int n, double d[], double e[], double V[][]) {\r\n # needs roughly 3N^3 operations\r\n def tql2 (n, d, e, V):\r\n\r\n # This is derived from the Algol procedures tql2, by\r\n # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for\r\n # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding\r\n # Fortran subroutine in EISPACK.\r\n\r\n num_opt = False # using vectors from numpy makes it faster\r\n\r\n if not num_opt:\r\n for i in range(1,n): # (int i = 1; i < n; i++):\r\n e[i-1] = e[i]\r\n else:\r\n e[0:n-1] = e[1:n]\r\n e[n-1] = 0.0\r\n\r\n f = 0.0\r\n tst1 = 0.0\r\n eps = 2.0**-52.0\r\n for l in range(n): # (int l = 0; l < n; l++) {\r\n\r\n # Find small subdiagonal element\r\n\r\n tst1 = max(tst1, abs(d[l]) + abs(e[l]))\r\n m = l\r\n while m < n:\r\n if abs(e[m]) <= eps*tst1:\r\n break\r\n m += 1\r\n\r\n # If m == l, d[l] is an eigenvalue,\r\n # otherwise, iterate.\r\n\r\n if m > l:\r\n iiter = 0\r\n while 1: # do {\r\n iiter += 1 # (Could check iteration count here.)\r\n\r\n # Compute implicit shift\r\n\r\n g = d[l]\r\n p = (d[l+1] - g) / (2.0 * e[l])\r\n r = (p**2 + 1)**0.5 # hypot(p,1.0)\r\n if p < 0:\r\n r = -r\r\n\r\n d[l] = e[l] / (p + r)\r\n d[l+1] = e[l] * (p + r)\r\n dl1 = d[l+1]\r\n h = g - d[l]\r\n if not num_opt:\r\n for i in range(l+2, n):\r\n d[i] -= h\r\n else:\r\n d[l+2:n] -= h\r\n\r\n f = f + h\r\n\r\n # Implicit QL transformation.\r\n\r\n p = d[m]\r\n c = 1.0\r\n c2 = c\r\n c3 = c\r\n el1 = e[l+1]\r\n s = 0.0\r\n s2 = 0.0\r\n\r\n # hh = V.T[0].copy() # only with num_opt\r\n for i in range(m-1, l-1, -1): # (int i = m-1; i >= l; i--) {\r\n c3 = c2\r\n c2 = c\r\n s2 = s\r\n g = c * e[i]\r\n h = c * p\r\n r = (p**2 + e[i]**2)**0.5 # hypot(p,e[i])\r\n e[i+1] = s * r\r\n s = e[i] / r\r\n c = p / r\r\n p = c * d[i] - s * g\r\n d[i+1] = h + s * (c * g + s * d[i])\r\n\r\n # Accumulate transformation.\r\n\r\n if not num_opt: # overall factor 3 in 30-D\r\n for k in range(n): # (int k = 0; k < n; k++) {\r\n h = V[k][i+1]\r\n V[k][i+1] = s * V[k][i] + c * h\r\n V[k][i] = c * V[k][i] - s * h\r\n else: # about 20% faster in 10-D\r\n hh = V.T[i+1].copy()\r\n # hh[:] = V.T[i+1][:]\r\n V.T[i+1] = s * V.T[i] + c * hh\r\n V.T[i] = c * V.T[i] - s * hh\r\n # V.T[i] *= c\r\n # V.T[i] -= s * hh\r\n\r\n p = -s * s2 * c3 * el1 * e[l] / dl1\r\n e[l] = s * p\r\n d[l] = c * p\r\n\r\n # Check for convergence.\r\n if abs(e[l]) <= eps*tst1:\r\n break\r\n # } while (Math.abs(e[l]) > eps*tst1);\r\n\r\n d[l] = d[l] + f\r\n e[l] = 0.0\r\n\r\n\r\n # Sort eigenvalues and corresponding vectors.\r\n if 11 < 3:\r\n for i in range(n-1): # (int i = 0; i < n-1; i++) {\r\n k = i\r\n p = d[i]\r\n for j in range(i+1, n): # (int j = i+1; j < n; j++) {\r\n if d[j] < p: # NH find smallest k>i\r\n k = j\r\n p = d[j]\r\n\r\n if k != i:\r\n d[k] = d[i] # swap k and i\r\n d[i] = p\r\n for j in range(n): # (int j = 0; j < n; j++) {\r\n p = V[j][i]\r\n V[j][i] = V[j][k]\r\n V[j][k] = p\r\n # tql2\r\n\r\n N = len(C[0])\r\n if 11 < 3:\r\n V = np.array([x[:] for x in C]) # copy each \"row\"\r\n N = V[0].size\r\n d = np.zeros(N)\r\n e = np.zeros(N)\r\n else:\r\n V = [[x[i] for i in xrange(N)] for x in C] # copy each \"row\"\r\n d = N * [0.]\r\n e = N * [0.]\r\n\r\n tred2(N, V, d, e)\r\n tql2(N, d, e, V)\r\n return (array(d), array(V))", "def eigenalgo(self, accuracy: float = 0, cap: int = 50000, version: str = \"Givens\", not_skip: bool = True):\n j, temps, verify_accuracy = 0, 0, np.ones((self.N, self.N), dtype=bool) ^ np.eye(self.N, dtype=bool)\n if version == \"Gram-Schmidt\":\n temps = time()\n while np.any(abs(self.vap[verify_accuracy]) > accuracy) and j < cap:\n j += 1\n q, r = self.gram_schmidt_qr()\n self.vap, self.vep = r @ q, self.vep @ q\n\n elif version == \"Givens\":\n verify_accuracy = np.ones((self.N, self.N), dtype=bool) ^ np.eye(self.N, dtype=bool)\n temps = time()\n while np.any(abs(self.vap[verify_accuracy]) > accuracy) and j < cap:\n j += 1\n q, r = self.givens_qr()\n self.vap, self.vep = r @ q, self.vep @ q\n\n elif version == \"Rayleigh\":\n not_sing, diff, cond, j = True, accuracy + 1, True, 0\n temps = time()\n while cond: # Stop condition, all eigenvalues must be different\n while diff > accuracy and j < cap and not_sing:\n j += 1\n self.rvap, self.vep, diff, not_sing = self.rayleigh_iteration(self.rvap, self.vep)\n\n cond = False\n if j < cap:\n self.calc, first, not_sing = np.zeros(self.N, dtype=bool), True, True\n for i in range(self.N):\n if np.sum(np.less(np.abs(self.rvap - self.rvap[i]), 10 ** -6)) != 1:\n self.rvap[i + 1:] += self.memorize[i]\n if first:\n self.memorize[i] += 0.5\n self.vep[i + 1:, i + 1:] = np.eye(self.N - i - 1)\n first, cond, diff = False, True, accuracy + 1\n self.calc[i + 1:] = 1\n temps = time() - temps\n return self.rvap, self.vep, diff, j, temps\n\n else:\n print(\"Please select an appropriate value for the version parameter\")\n\n temps = time() - temps\n diff = np.max(abs(self.vap[verify_accuracy]))\n return np.diag(self.vap), self.vep, diff, j, temps", "def compute_eigvals(phi): # pylint: disable=arguments-differ\n if qml.math.get_interface(phi) == \"tensorflow\":\n phase = qml.math.exp(-0.5j * qml.math.cast_like(phi, 1j))\n return stack_last([phase, qml.math.conj(phase), qml.math.conj(phase), phase])\n\n prefactors = qml.math.array([-0.5j, 0.5j, 0.5j, -0.5j], like=phi)\n if qml.math.ndim(phi) == 0:\n product = phi * prefactors\n else:\n product = qml.math.outer(phi, prefactors)\n return qml.math.exp(product)", "def solve_elas(self,x,E_p=None):\n \n if x['Crystal_Structure'] == \"Cubic\":\n self.estf = self.Ccubic( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2] )\n\n elif x['Crystal_Structure'] == \"HCP\":\n self.estf = self.Chcp( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2], x['Stiffness'][3], x['Stiffness'][4] )\n\n # Update orientation\n for n in range(9):\n cell_num_list = list((9*self.cell_num)+n)\n self.orient.vector()[cell_num_list] = self.rots[self.subdomain_num,n]\n \n self.a = inner(self.sigs3x3(self.u), sym(grad(self.v)))*dx\n \n if E_p:\n # Note use of sym(), assuming E_p to be the \\chi field\n L_elas_rhs = self.L_elas + inner(self.sigs_e(sym(E_p)), sym(grad(self.v)))*dx\n else:\n L_elas_rhs = self.L_elas \n\n self.A_elas, self.b_elas = assemble_system(self.a, L_elas_rhs, self.bc_elas) \n \n # Attach near nullspace to matrix\n as_backend_type(self.A_elas).set_near_nullspace(self.null_space)\n\n # Set matrix operator\n self.elasticity_solver.set_operator(self.A_elas);\n\n # Compute solution\n self.elasticity_solver.solve(self.ue.vector(), self.b_elas);\n \n if E_p:\n self.Ue_sym = project( sym(grad(self.ue) - E_p), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n else:\n self.Ue_sym = project( sym(grad(self.ue)), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n \n self.sim_strn = np.reshape(self.Ue_sym.vector().get_local(),(len(self.grains.array()),9))\n\n for grain_no in range(self.grains.array().max()):\n # Grain numbering is 1 index origin\n cell_subset = self.grains.array()==(grain_no+1)\n if np.any(cell_subset):\n self.sim_avg[grain_no,:] = np.average(self.sim_strn[cell_subset,:],\n axis=0,weights=self.dVol[cell_subset]) \n \n deps = self.exp_strn - self.sim_avg\n resid = np.linalg.norm(deps.ravel())\n print(resid) #,self.its)\n return resid", "def compute_eigvals(phi): # pylint: disable=arguments-differ\n if qml.math.get_interface(phi) == \"tensorflow\":\n phi = qml.math.cast_like(phi, 1j)\n\n return qml.math.stack([1, 1, -qml.math.exp(1j * phi), qml.math.exp(1j * phi)])", "def eigen_operator(shape, e, v, **kargs):\n def matvec(x):\n k = [np.dot(x.T, vi) for vi in v]\n return np.sum([ki * ei * vi for ki, ei, vi in zip(k, e, v)], axis=0)\n return LinearOperator(shape, matvec=matvec, rmatvec=matvec, **kargs)", "def eigenvals(self, evals_count=6):\n hamiltonian_mat = self.hamiltonian()\n return hamiltonian_mat.eigenenergies(eigvals=evals_count)", "def hessian_matrix_eigvals(H_elems):\n return _symmetric_compute_eigenvalues(H_elems)", "def solve_gevp_gen(a, t_0, algorithm, sort_by_vectors=15, **kwargs):\n B = np.matrix(a[t_0])\n try:\n f = algorithm(B=B, **kwargs)\n except TypeError:\n # If the function doesn't do currying, implement that here\n f = lambda A: algorithm(B=B, A=A)\n except LinAlgError:\n return\n\n eigenvectors = None\n count = 0\n\n for j in range(t_0 + 1, 32):\n try:\n eigenvalues, new_eigenvectors = f(np.matrix(a[j]))\n \n if eigenvectors is None:\n eigenvectors = np.zeros_like(new_eigenvectors)\n\n if j < sort_by_vectors:\n # TODO Sortieren nach Eigenwert\n perm = permutation_indices(eigenvalues)\n else:\n perm = reorder_by_ev(new_eigenvectors, eigenvectors, B)\n\n eigenvectors = new_eigenvectors[:,perm]\n eigenvalues = eigenvalues[:,perm]\n \n count += 1\n\n yield eigenvalues, eigenvectors\n\n except (LinAlgError, TypeError) as e:\n #import traceback\n #traceback.print_exc()\n return", "def put_eigvecs(self, dest):\n if parallel.is_rank_zero():\n self.put_array(self.eigvecs, dest)\n parallel.barrier()", "def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)", "def set_eigenvalue_problem(self, *args, ncc_cutoff=1e-10, tolerance=1e-10, **kwargs):\n # should be set EVP for consistency with set IVP. Why do we have P_problem. Why not IVP, EVP.\n self.problem_type = 'EVP'\n self.problem = de.EVP(self.domain, variables=self.variables, eigenvalue='omega', ncc_cutoff=ncc_cutoff, tolerance=tolerance)\n self.problem.substitutions['dt(f)'] = \"omega*f\"\n self.set_equations(*args, **kwargs)", "def compute_eigvals(theta, num_wires): # pylint: disable=arguments-differ\n eigs = qml.math.convert_like(pauli_eigs(num_wires), theta)\n\n if qml.math.get_interface(theta) == \"tensorflow\":\n theta = qml.math.cast_like(theta, 1j)\n eigs = qml.math.cast_like(eigs, 1j)\n\n if qml.math.ndim(theta) == 0:\n return qml.math.exp(-0.5j * theta * eigs)\n\n return qml.math.exp(qml.math.outer(-0.5j * theta, eigs))", "def plot_eigenvectors(*, obs_names, eigenvecs, fig_size=(8, 6), font_size=12,\n label_size=16, save_fig=False, write_path=None):\n # Loop over directions and plot each eigenvector on a separate figure\n for direction in range(eigenvecs.shape[1]):\n plt.figure(figsize=fig_size)\n plt.plot(np.abs(eigenvecs[::3, direction]), 'bx',\n np.abs(eigenvecs[1::3, direction]), 'rx',\n np.abs(eigenvecs[2::3, direction]), 'cx',\n markersize=10, mew=3)\n plt.ylim(0, 1)\n plt.grid()\n plt.yticks(fontsize=font_size)\n plt.xticks(range(len(obs_names)), obs_names, fontsize=font_size)\n plt.xticks(rotation=60)\n plt.ylabel(r'$\\mathbf{{v}}_{%03d}$' % (direction), fontsize=label_size)\n plt.legend(['$r$ direction', r'$\\theta$ direction',\n '$\\phi$ direction'], loc='upper right', frameon=False,\n fontsize=label_size)\n plt.xlabel('Location', fontsize=label_size)\n\n if save_fig is True:\n # Create the output directory if it does not exist\n if not os.path.exists(write_path):\n os.makedirs(write_path)\n fpath = os.path.join(write_path,\n 'eigendirection%03d.png' % direction)\n plt.savefig(fpath, bbox_inches='tight')\n plt.close()", "def eig(x1):\n\n is_x1_dparray = isinstance(x1, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray):\n if (x1.size > 0):\n return dpnp_eig(x1)\n\n return call_origin(numpy.linalg.eig, x1)", "def compute_mesh_eigenfunctions(self, mesh, star0, star1, bdry=False):\n nb = len(mesh)\n\n inputs = []\n for m, s0, s1 in zip(mesh, star0, star1):\n d = m['int_d01']\n if bdry:\n d = scipy.sparse.vstack([d, m['bdry_d01']])\n inputs.extend([s0, s1, d])\n\n eigenvalues, eigenvectors = [], []\n outputs = self.hodgefunc(nb, self.num_eigenvectors,\n self.num_extra_eigenvectors, *inputs)\n for i in range(nb):\n eigenvalues.append(outputs[2*i])\n eigenvectors.append(outputs[2*i+1])\n\n return eigenvalues, eigenvectors", "def solve_eigenproblem(self, A, M=None, num=None, tol=None):\n def eigenproblem_is_hermitian():\n return is_hermitian(A) and (M == None or is_hermitian(M))\n\n if self.is_hermitian() and not eigenproblem_is_hermitian():\n raise ValueError(\"Eigenproblem matrices are non-Hermitian but solver \"\n \"assumes Hermitian matrices. Aborting.\")\n logger.info(\"Solving eigenproblem. This may take a while...\")\n df.tic()\n omegas, ws = self._solve_eigenproblem(A, M=M, num=num, tol=tol)\n logger.info(\"Computing the eigenvalues and eigenvectors \"\n \"took {}\".format(format_time(df.toc())))\n\n # XXX TODO: Remove this conversion to numpy.arrays once we\n # have better support for different kinds of\n # matrices (but the conversion would happen in\n # compute_relative_error() anyway, so by doing it\n # here we avoid doing it multiple times.\n if not isinstance(A, np.ndarray):\n logger.warning(\n \"Converting sparse matrix A to dense array to check whether it is \"\n \"Hermitian. This might consume a lot of memory if A is big!.\")\n A = as_dense_array(A)\n if not isinstance(M, (np.ndarray, NoneType)):\n logger.warning(\n \"Converting sparse matrix M to dense array to check whether it is \"\n \"Hermitian. This might consume a lot of memory if M is big!.\")\n M = as_dense_array(M)\n\n rel_errors = np.array(\n [compute_relative_error(A, M, omega, w) for omega, w in zip(omegas, ws)])\n return omegas, ws, rel_errors", "def matrix_eig(\n self,\n chis=None,\n eps=0,\n print_errors=\"deprecated\",\n hermitian=False,\n break_degenerate=False,\n degeneracy_eps=1e-6,\n sparse=False,\n trunc_err_func=None,\n evenTrunc = False,\n ):\n if print_errors != \"deprecated\":\n msg = (\n \"The `print_errors` keyword argument has been deprecated, \"\n \"and has no effect. Rely instead on getting the error as a \"\n \"return value, and print it yourself.\"\n )\n warnings.warn(msg)\n # If chis is not specfied, there is no even truncation scheme; else, we\n # keep track of the chi we specfied\n if chis is None:\n evenTrunc = False\n else:\n try:\n chis = list(chis)\n except TypeError:\n chis = [chis]\n chiSpec = max(chis)\n chis = self._matrix_decomp_format_chis(chis, eps)\n maxchi = max(chis)\n assert self.defval == 0\n assert self.invar\n assert self.charge == 0\n assert self.dirs[0] + self.dirs[1] == 0\n assert set(zip(self.qhape[0], self.shape[0])) == set(\n zip(self.qhape[1], self.shape[1])\n )\n\n S_dtype = np.float_ if hermitian else np.complex_\n U_dtype = self.dtype if hermitian else np.complex_\n\n # Eigenvalue decompose each sector at a time.\n # While doing so, also keep track of a list of all eigenvalues, as well\n # as a heap that gives the negative of the absolute value of the\n # largest eigenvalue in each sector. These will be needed later when\n # deciding how to truncate the eigenvalues.\n eigdecomps = {}\n dims = {}\n minusabs_next_eigs = []\n all_eigs = []\n for k, v in self.sects.items():\n if 0 in v.shape:\n # This matrix is empty and trivial.\n shp = v.shape\n m = min(shp)\n u = np.empty((shp[0], m), dtype=U_dtype)\n s = np.empty((m,), dtype=S_dtype)\n eigdecomp = (s, u)\n else:\n if sparse and maxchi < min(v.shape) - 1:\n if hermitian:\n s, u = spsla.eighs(\n v, k=maxchi, return_eigenvectors=True\n )\n else:\n s, u = spsla.eigs(\n v, k=maxchi, return_eigenvectors=True\n )\n else:\n if hermitian:\n s, u = np.linalg.eigh(v)\n else:\n s, u = np.linalg.eig(v)\n order = np.argsort(-np.abs(s))\n s = s[order]\n u = u[:, order]\n s = s.astype(S_dtype)\n u = u.astype(U_dtype)\n eigdecomp = (s, u)\n eigdecomps[k] = eigdecomp\n dims[k] = 0\n all_eigs.append(s)\n if 0 not in s.shape:\n heapq.heappush(minusabs_next_eigs, (-np.abs(s[0]), k))\n try:\n all_eigs = np.concatenate(all_eigs)\n except ValueError:\n all_eigs = np.array((0,))\n\n if sparse:\n norm_sq = self.norm_sq()\n else:\n norm_sq = None\n\n # Figure out what bond dimension to truncate to, how this bond\n # dimension is distributed over the different sectors, and what the\n # truncation error is.\n chi, dims, rel_err = type(self)._find_trunc_dim(\n all_eigs,\n eigdecomps,\n minusabs_next_eigs,\n dims,\n chis=chis,\n eps=eps,\n break_degenerate=break_degenerate,\n degeneracy_eps=degeneracy_eps,\n trunc_err_func=trunc_err_func,\n norm_sq=norm_sq,\n )\n\n # truncate in both sectors evenly\n if evenTrunc and chiSpec == chi:\n # This piece of codes is only designed\n # with Z2 symmetry tensor in mind\n errmeg = \"The matrix should have two sectors (0,0) and (1,1).\"\n assert len(dims) == 2, errmeg\n if chiSpec % 2 == 0:\n dims[(0, 0)] = int(chiSpec / 2)\n dims[(1, 1)] = int(chiSpec / 2)\n else:\n dims[(0, 0)] = int((chiSpec + 1) / 2)\n dims[(1, 1)] = int((chiSpec - 1) / 2)\n\n # Truncate each block and create the dim for the new index.\n new_dim = []\n new_qim = []\n eigdecomps = {k: v for k, v in eigdecomps.items() if dims[k] > 0}\n for k, v in eigdecomps.items():\n d = dims[k]\n if d > 0:\n new_dim.append(d)\n new_qim.append(k[0])\n eigdecomps[k] = (v[0][:d], v[1][:, :d])\n else:\n del eigdecomps[k]\n\n # Initialize S and U.\n d = self.dirs[0]\n S = type(self)(\n [new_dim],\n qhape=[new_qim],\n dirs=[d],\n qodulus=self.qodulus,\n dtype=S_dtype,\n invar=False,\n charge=0,\n )\n U = type(self)(\n [self.shape[0], new_dim],\n qhape=[self.qhape[0], new_qim],\n dirs=[d, -d],\n qodulus=self.qodulus,\n dtype=U_dtype,\n charge=0,\n )\n\n # Set the blocks of U, S and V.\n for k, v in eigdecomps.items():\n S[(k[0],)] = v[0]\n k_U = (k[0], k[0])\n U[k_U] = v[1]\n\n return S, U, rel_err", "def run_vqe(\n self,\n backend=Aer.get_backend(\"statevector_simulator\"),\n var_form=None,\n optimizer=None,\n reps=None,\n mode=\"min_val\",\n ):\n # N=int(np.ceil(np.log2(len(self.mat))))\n # hk = np.zeros((2**N,2**N),dtype='complex')\n # hk[:self.mat.shape[0], :self.mat.shape[1]] = self.mat\n N = self.n_qubits()\n if mode == \"max_val\":\n Hamil_mat = aqua.operators.MatrixOperator(-1 * self.mat)\n # Hamil_mat = MatrixOperator(-1 * self.mat)\n else:\n Hamil_mat = aqua.operators.MatrixOperator(self.mat)\n # Hamil_mat = MatrixOperator(self.mat)\n Hamil_qop = aqua.operators.op_converter.to_weighted_pauli_operator(\n Hamil_mat\n )\n if var_form is None:\n if reps is None:\n reps = 2\n # reps=5\n from qiskit.circuit.library import EfficientSU2\n\n var_form = EfficientSU2(N, reps=reps)\n if optimizer is None:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form)\n # vqe = VQE(Hamil_qop, var_form)\n else:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form, optimizer)\n # vqe = VQE(Hamil_qop, var_form, optimizer)\n vqe_result = vqe.run(backend)\n en = np.real(vqe_result[\"eigenvalue\"])\n # params=vqe.optimal_params\n # circuit=vqe.construct_circuit(params)\n if mode == \"max_val\":\n en = -1 * en\n # states = np.sort(\n # np.real(\n # vqe.expectation.convert(\n # StateFn(vqe.operator, is_measurement=True)\n # ).to_matrix()\n # )\n # )\n return en, vqe_result, vqe", "def reflect_eigenvectors(x):\n # random reference vector\n xnew = x.copy()\n for v in range(x.shape[-1]):\n cum_sum = x[0, :, v]\n cum_sum /= np.linalg.norm(cum_sum)\n for i in np.arange(1, x.shape[0]): \n if np.any(np.isnan(x[i, :, v])):\n xnew[i, :, v] = x[i, :, v]\n else:\n cos = cum_sum.dot(x[i, :, v])\n if cos > 0:\n cum_sum += x[i, :, v]\n cum_sum /= np.linalg.norm(cum_sum)\n\n else:\n cum_sum += np.negative(x[i, :, v])\n cum_sum /= np.linalg.norm(cum_sum)\n xnew[i, :, v] = np.negative(x[i, :, v])\n \n return xnew", "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return w, v", "def eig(A):\n lambdas, vs = linalg.eig(A)\n lvs = [(lambdas[i], vs[i]) for i in range(len(vs))]\n\n def lv_cmp(lv1, lv2):\n lambda1 = lv1[0]\n lambda2 = lv2[0]\n\n r1 = numpy.real(lambda1)\n i1 = numpy.real(lambda1)\n r2 = numpy.real(lambda2)\n i2 = numpy.real(lambda2)\n\n if r1 > r2:\n return 1\n elif r1 < r2:\n return -1\n elif i1 > i2:\n return 1\n elif i1 < i2:\n return -1\n return 0\n\n lvs.sort(cmp = lv_cmp)\n\n lambdas = [lv[0] for lv in lvs]\n vs = [lv[1] for lv in lvs ]\n return lambdas, vs", "def left_eigenvectors(matrix, nvals=None):\n matrix = np.asarray(matrix)\n return _eigenvectors(matrix.transpose(), nvals)", "def run_numpy(self):\n return np.linalg.eigh(self.mat)", "def geneiv(A, B):\n Li = np.linalg.inv(linalg.cholesky(B).T)\n C = Li*A*(Li.T)\n C = np.asmatrix((C + C.T)*0.5, np.float32)\n\n eivs, V = np.linalg.eig(C)\n return eivs, Li.T*V", "def eigen_vector_i(self, i):\n return self._eig_vec[:,i]", "def pde_eigv(self, u):\n u0, u1, u2 = u.T\n c = np.sqrt(9.81*u0)\n vel = np.sqrt((u1/u0)**2 + (u2/u0)**2)\n return np.array([vel-c, vel, vel+c])", "def put_eigvals(self, dest):\n if parallel.is_rank_zero():\n self.put_array(self.eigvals, dest)\n parallel.barrier()", "def get_decomp(self, eigvals_src, eigvecs_src):\n self.eigvals = np.squeeze(np.array(parallel.call_and_bcast(\n self.get_array, eigvals_src)))\n self.eigvecs = parallel.call_and_bcast(self.get_array, eigvecs_src)", "def test_eigenvalues_in_cylinders(self):\n random.seed(102938482634)\n point_cloud = load(os.path.join('testdata', 'AHN3.las'))\n num_all_pc_points = len(point_cloud[keys.point][\"x\"][\"data\"])\n rand_indices = [random.randint(0, num_all_pc_points) for _ in range(20)]\n target_point_cloud = utils.copy_point_cloud(point_cloud, rand_indices)\n radius = 2.5\n neighbors = compute_neighbors.compute_cylinder_neighborhood(point_cloud, target_point_cloud, radius)\n\n compute_features(point_cloud, neighbors, target_point_cloud,\n [\"eigenv_1\", \"eigenv_2\", \"eigenv_3\"], InfiniteCylinder(5))\n\n self.assertEqual(\"laserchicken.feature_extractor.eigenvals_feature_extractor\",\n target_point_cloud[keys.provenance][-1][\"module\"])", "def eig(self, q):\n q1, q2 = q.T\n c = np.sqrt(9.81*q1)\n lambda1 = q2/q1 - c\n lambda2 = q2/q1 + c\n return np.array([lambda1, lambda2])", "def compute_residualsNEW(eigenvectors, eps_scale=3, progressBar=True, skipFirst=True, bandwidth_type='median'):\n #from multiprocessing import Pool\n # Check for more than two eigenvectors.\n assert eigenvectors.shape[1] > 2, 'There must be more than two eigenvectors to compute residuals.'\n\n # Set up the residuals and define the second as one.\n residual = np.zeros(eigenvectors.shape[1])\n if skipFirst:\n residual[1] = 1\n firstIndex = 2\n else:\n residual[0] = 1\n firstIndex = 1\n\n ivals = range(firstIndex, eigenvectors.shape[1])\n jvals = np.arange(eigenvectors.shape[0])\n \n # Run the residual calculation for each eigenvector. \n for i in ivals:\n num, den = 0, 0\n\n if bandwidth_type == 'median':\n bandwidth = np.power((np.median(scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(eigenvectors[:,:i]))) / eps_scale), 2)\n elif bandwidth_type == 'mean':\n bandwidth = np.power((np.mean(scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(eigenvectors[:,:i]))) / eps_scale), 2)\n else:\n raise ValueError('bandwidth_type must be either median or mean')\n\n if bandwidth == 0:\n bandwidth = np.finfo(float).eps\n \n regression = partial(local_linear_regression, eigenvectors[:, :i], eigenvectors[:, i], bandwidth=bandwidth, bandwidth_type=bandwidth_type)\n\n fit = np.array(list(map(regression, jvals)))[jvals, jvals]\n num = np.sum(np.power(eigenvectors[:, i] - fit, 2))\n den = np.sum(np.power(eigenvectors[:, i], 2))\n \n # Compute a residual for eigenvector i.\n residual[i] = np.sqrt(num / den)\n \n result = dict([('Residuals', residual), ('Eps Scale', eps_scale), ('Bandwidth Type', bandwidth_type)])\n \n return result", "def test_solvers():\n # With P1 elements we have an error E-15 with Krylov solver\n # tolerances of 1E-12, but with P2 elements the error is E-6.\n # P3 elements drive the tolerance down to E-3.\n # For higher mesh resolution we also need reduced tolerances.\n # The tol dict maps degree to expected tolerance for the coarse\n # meshes in the test.\n tol = {'direct': {1: 1E-11, 2: 1E-11, 3: 1E-11},\n 'Krylov': {1: 1E-14, 2: 1E-05, 3: 1E-03}}\n u_D = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')\n kappa = Expression('x[0] + x[1]')\n f = Expression('-8*x[0] - 10*x[1]')\n for Nx, Ny in [(3,3), (3,5), (5,3)]:\n for degree in 1, 2, 3:\n for linear_solver in 'direct', 'Krylov':\n for solver_func in solver, solver_objects:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n # Important: Krylov solver error must be smaller\n # than tol!\n u = solver_func(\n kappa, f, u_D, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol[linear_solver][degree],\n rel_tol=0.1*tol[linear_solver][degree])\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_D_Function = interpolate(u_D, V) # exact solution\n # Check that dof arrays are equal\n u_D_array = u_D_Function.vector().array() # dof values\n max_error = (u_D_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol[linear_solver][degree], msg", "def power_iteration_two_components(X):\n evector1, evalue1=power_iteration(X)\n X=X-np.outer(evector1.dot(X.T), evector1) #project each point(aka each row of X) on the first euginevector and then substract that from each row\n evector2, evalue2=power_iteration(X)\n\n eigenvectors=np.vstack([evector1,evector2])\n eigenvalues=np.array([evalue1,evalue2])\n\n return eigenvectors, eigenvalues", "def heavy_fixCM_eigvals(NP, b, c, params):\n l = params['l']\n k = params['k']\n I3 = params['I3']\n # Here, omega_3 is just the MAGNITUDE, not signed\n w3 = np.abs(params['w3'][0])\n gn = params['Mm'] * params['g']\n\n # Check output if small system\n print 'gn = ', gn\n print 'b = ', b\n print 'c = ', c\n\n if NP == 1:\n pass\n elif NP == 2:\n matrix = -np.array([[0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0.],\n [(-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.,\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.],\n [0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3)],\n [(-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.]\n ])\n print 'exact matrix = ', matrix\n eigvals = np.array([\n 1j * l * gn / (I3 * w3),\n -1j * l * gn / (I3 * w3),\n l * np.sqrt(gn) * np.sqrt(0j - 2. * l * k * (-1) ** (b) - gn) / (I3 * w3),\n -l * np.sqrt(gn) * np.sqrt(0j - 2. * l * k * (-1) ** (b) - gn) / (I3 * w3)\n ])\n print 'exact_eigvals are =', eigvals\n return eigvals\n elif NP == 3:\n matrix = -np.array([[0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0., 0., 0.],\n [(-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.,\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0., 0., 0.],\n [0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0.],\n [(-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn - 2. * (-1) ** (b) * l ** 2 * k) / (I3 * w3), 0., \\\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.],\n [0., 0., 0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3)],\n [0., 0., (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.]\n ])\n print 'exact matrix = ', matrix\n\n eigvals = np.array([\n 1j * l * gn / (I3 * w3),\n # -1j*l*gn/(I3*w3),\n l * np.sqrt(gn) * np.sqrt(0j - 3. * l * k * (-1) ** (b) - gn) / (I3 * w3),\n # -l*np.sqrt(gn)*np.sqrt(0j-3.*l*k*(-1)**(b) - gn)/(I3*w3),\n l * np.sqrt(gn) * np.sqrt(0j - l * k * (-1) ** (b) - gn) / (I3 * w3),\n # -l*np.sqrt(gn)*np.sqrt(0j - l*k*(-1)**(b) - gn)/(I3*w3)\n ])\n return eigvals\n else:\n return np.array([])", "def build(self, data: np.ndarray):\n ret = data.dot(self.eigenvectors)\n self.pca_predictor_vars = ret\n return ret", "def check(mat, otp):\n prd = mat*otp\n eigval = prd[0]/otp[0]\n print 'computed eigenvalue :' , eigval\n [eigs, vecs] = np.linalg.eig(mat)\n abseigs = list(abs(eigs))\n ind = abseigs.index(max(abseigs))\n print ' largest eigenvalue :', eigs[ind]", "def get_eig(self, tolerance=None):\r\n E, V = scipy.linalg.eig(self.data)\r\n E = pd.Series(E.real, name=\"EIG\")\r\n V = pd.DataFrame(V.real)\r\n if tolerance is not None:\r\n E[E/E.max() < tolerance] = 0\r\n return E, V", "def load_eig(self, **kwargs):\r\n eig = kwargs['eig']\r\n\r\n self.eig = EIG(eig)" ]
[ "0.72855383", "0.6691216", "0.66405857", "0.661532", "0.6592952", "0.6520123", "0.6501074", "0.6423186", "0.64160115", "0.6396994", "0.63655776", "0.6349273", "0.63411045", "0.633906", "0.63198537", "0.62777245", "0.62247896", "0.6177474", "0.61190665", "0.6048089", "0.6035364", "0.6035216", "0.6025096", "0.5983382", "0.59536195", "0.59436613", "0.5936795", "0.5936643", "0.5924191", "0.5913852", "0.5819113", "0.58102685", "0.58079034", "0.5803273", "0.57847077", "0.5771842", "0.5760612", "0.5749367", "0.5725966", "0.57205045", "0.5717227", "0.56911206", "0.5688461", "0.5688453", "0.5671726", "0.5656796", "0.56439185", "0.5641463", "0.5636974", "0.5636009", "0.56317985", "0.56004167", "0.55731887", "0.5546995", "0.55451417", "0.5543868", "0.5531172", "0.54995495", "0.54908574", "0.5489857", "0.54854876", "0.5476947", "0.54764116", "0.54743", "0.5473137", "0.5461834", "0.54556674", "0.5450146", "0.5447554", "0.5442264", "0.54384804", "0.54375523", "0.54230493", "0.5411108", "0.5411037", "0.54091144", "0.5384037", "0.5364029", "0.5357763", "0.53563213", "0.5354231", "0.53502697", "0.5346515", "0.5333148", "0.532113", "0.53189623", "0.53155226", "0.5298988", "0.5295415", "0.5286532", "0.52846235", "0.5281251", "0.527359", "0.5260705", "0.5251012", "0.5247143", "0.5235704", "0.5231239", "0.5224415", "0.52154183" ]
0.6650587
2
Returns weight and bias tensors for each layer of the RNN. These tensors are views on the underlying weight buffer allocated by CuDNN.
def get_parameters(fn, handle, weight_buf): cudnn_methods = [ cudnn.lib.cudnnGetRNNLinLayerMatrixParams, cudnn.lib.cudnnGetRNNLinLayerBiasParams ] params = [] num_linear_layers = _num_linear_layers(fn) num_layers = fn.num_directions * fn.num_layers for layer in range(num_layers): layer_params = [] for cudnn_method in cudnn_methods: for linear_id in range(num_linear_layers): lin_layer_mat_desc = cudnn.FilterDescriptor() matrix_pointer = ctypes.c_void_p() check_error(cudnn_method( handle, fn.rnn_desc, layer, fn.x_descs[0], fn.w_desc, ctypes.c_void_p(weight_buf.data_ptr()), linear_id, lin_layer_mat_desc, ctypes.byref(matrix_pointer))) data_type = ctypes.c_int() format = ctypes.c_int() nb_dims = ctypes.c_int() min_dim = 3 filter_dim_a = torch.IntTensor(min_dim) check_error(cudnn.lib.cudnnGetFilterNdDescriptor( lin_layer_mat_desc, min_dim, ctypes.byref(data_type), ctypes.byref(format), ctypes.byref(nb_dims), ctypes.c_void_p(filter_dim_a.data_ptr()))) filter_dim_a.resize_(nb_dims.value) elem_size = cudnn._sizeofmap[fn.datatype] offset_bytes = (matrix_pointer.value - weight_buf.data_ptr()) assert(offset_bytes % elem_size == 0) offset = offset_bytes // elem_size # for all the RNN types provided by CUDNN, all the ih weights # are the same size and are allocated in a contiguous chunk # (same for the hh weights, and the ih and hh biases). # Since we're storing all the weights in a single tensor anyway, # might as well merge the CUDNN ones into a single tensor as well if linear_id == 0 or linear_id == num_linear_layers / 2: assert(filter_dim_a.prod() == filter_dim_a[0]) param = fn.weight_buf.new().set_( weight_buf.storage(), offset, filter_dim_a[0] * num_linear_layers // 2, filter_dim_a[2]) layer_params.append(param) else: assert(cur_offset == offset) cur_offset = offset + filter_dim_a[0] params.append(layer_params) return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_weights_and_bias(self, rnn_weights, rnn_props):\n # from code of tensorflow GRU cell, it can be known that shape of hidden_kernel(or candidate_kernel)\n # is (input_size+hidden_unit, hidden_unit)\n hidden_size = rnn_weights[\"hidden_kernel\"].value.shape[1]\n input_size = rnn_weights[\"hidden_kernel\"].value.shape[0] - hidden_size\n weight_dtype = rnn_weights[\"hidden_kernel\"].dtype\n bias_dtype = rnn_weights[\"hidden_bias\"].dtype\n # below code will use same notation as ONNX document\n # z means update gate, r means reset gate, h means hidden gate;\n # at this time weights of gate include input and state, will split it next\n r_kernel, z_kernel = np.split(rnn_weights[\"gate_kernel\"].value, [hidden_size], axis=1)\n h_kernel = rnn_weights[\"hidden_kernel\"].value\n r_bias, z_bias = np.split(rnn_weights[\"gate_bias\"].value, [hidden_size], axis=0)\n h_bias = rnn_weights[\"hidden_bias\"].value\n # ONNX GRU split weights of input and state, so have to split *_kernel\n input_r_kernel, state_r_kernel = np.split(r_kernel, [input_size], axis=0)\n input_z_kernel, state_z_kernel = np.split(z_kernel, [input_size], axis=0)\n input_h_kernel, state_h_kernel = np.split(h_kernel, [input_size], axis=0)\n W_zrh = np.concatenate((input_z_kernel, input_r_kernel, input_h_kernel), axis=1)\n R_zrh = np.concatenate((state_z_kernel, state_r_kernel, state_h_kernel), axis=1)\n # transpose weight matrix\n W_zrh = np.transpose(np.expand_dims(W_zrh, axis=0), axes=(0, 2, 1))\n R_zrh = np.transpose(np.expand_dims(R_zrh, axis=0), axes=(0, 2, 1))\n W_zrh = W_zrh.astype(weight_dtype)\n R_zrh = R_zrh.astype(weight_dtype)\n assert W_zrh.shape == (1, 3*hidden_size, input_size)\n assert R_zrh.shape == (1, 3*hidden_size, hidden_size)\n Wb_zrh = np.concatenate((z_bias, r_bias, h_bias), axis=0)\n # tf don't have bias for state, so use 0 instead\n zero = np.zeros_like(z_bias)\n Rb_zrh = np.concatenate((zero, zero, zero), axis=0)\n B_zrh = np.concatenate((Wb_zrh, Rb_zrh), axis=0)\n B_zrh = np.expand_dims(B_zrh, axis=0)\n B_zrh = B_zrh.astype(bias_dtype)\n assert B_zrh.shape == (1, 6*hidden_size)\n # create const ONNX node\n w_name = utils.make_name(\"W\")\n w_node = self.g.make_const(w_name, W_zrh, skip_conversion=True)\n\n r_name = utils.make_name(\"R\")\n r_node = self.g.make_const(r_name, R_zrh, skip_conversion=True)\n\n b_name = utils.make_name(\"B\")\n b_node = self.g.make_const(b_name, B_zrh, skip_conversion=True)\n\n rnn_props.input_size = input_size\n rnn_props.hidden_size = hidden_size\n rnn_props.onnx_input_ids[\"W\"] = w_node.output[0]\n rnn_props.onnx_input_ids[\"R\"] = r_node.output[0]\n rnn_props.onnx_input_ids[\"B\"] = b_node.output[0]", "def get_weights(self):\n weights = []\n for layer in self.layers:\n weights += layer.weights\n return K.batch_get_value(weights)", "def get_weights(self):\n # First part is iterating over hidden weights. Then append the output weight.\n return [self.hidden_layers[i].get_weight().cpu().detach().numpy() for i in range(self.depth)] + \\\n [self.output_weight.cpu().detach().numpy()]", "def get_weights(self):\n # First part is iterating over hidden weights. Then append the output weight.\n return [self.hidden_layers[i].get_weight() for i in range(self.depth)] + \\\n [self.output_weight.cpu().detach().numpy()]", "def get_weights_tensor(self):\n return [self.hidden_layers[i].get_weight_tensor() for i in range(self.depth)] + \\\n [self.output_weight.clone()]", "def _create_weights(self):\n\n self.mu_W = tf.get_variable(\n name=\"mu_W\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim, self.z_dim])\n\n self.mu_b = tf.get_variable(\n name=\"mu_b\", initializer=tf.random_normal_initializer(),\n shape=[self.z_dim])\n\n self.log_sig_sq_W = tf.get_variable(\n name=\"log_sig_sq_W\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim, self.z_dim])\n\n self.log_sig_sq_b = tf.get_variable(\n name=\"log_sig_sq_b\", initializer=tf.random_normal_initializer(),\n shape=[self.z_dim])\n \n self.y_W = tf.get_variable(\n name=\"y_W\", initializer=tf.random_normal_initializer(),\n shape=[self.z_dim, self.rnn_dim])\n\n self.y_b = tf.get_variable(\n name=\"y_b\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim])\n \n self.softmax_W = tf.get_variable(\n name=\"softmax_W\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim, self.vocabulary_size])\n \n self.softmax_b = tf.get_variable(\n name=\"softmax_b\", initializer=tf.random_normal_initializer(),\n shape=[self.vocabulary_size])", "def _extract_weights(self,W):\n wl1_size = self._D*self._hidden_layer_size\n bl1_size = self._hidden_layer_size\n \n wl2_size = self._hidden_layer_size*self._output_size\n bl2_size = self._output_size\n\n \n weights_L1 = W[0:wl1_size].reshape((self._D,self._hidden_layer_size))\n bias_L1 = W[wl1_size:wl1_size+bl1_size]\n \n start_l2 = wl1_size+bl1_size\n\n weights_L2 = W[start_l2: start_l2 + wl2_size].reshape((self._hidden_layer_size,self._output_size))\n bias_L2 = W[start_l2 + wl2_size : start_l2 + wl2_size + bl2_size]\n \n \n \n return weights_L1,bias_L1,weights_L2,bias_L2", "def get_weights(self):\n return [self.w, self.b]", "def get_weights(self):\n\n return self._cnn_model.get_weights()", "def get_weights(self):\n return self.nn.get_param_values()", "def GetWeights(self) -> numpy.ndarray:\n return numpy.concatenate(list(\n weight_layer.GetWeights() for weight_layer in self._weight_layers))", "def _generate_weights(self):\n weights = []\n for i in range(1, len(self.layers) - 1):\n weights.append(2 * np.random.random(\n (self.layers[i - 1] + 1, self.layers[i] + 1)) - 1)\n weights.append(2 * np.random.random(\n (self.layers[i] + 1, self.layers[i + 1])) - 1)\n return weights", "def get_weights(self):\n params = self.weights\n return K.batch_get_value(params)", "def return_weights(self):\n w0 = self.comparator.weight.data.numpy()\n b0 = self.comparator.bias.data.numpy()\n\n w1 = self.matcher.weight.data.numpy()\n b1 = self.matcher.bias.data.numpy()\n\n w2 = self.head.weight.data.numpy()\n b2 = self.head.bias.data.numpy()\n\n return w0, b0, w1, b1, w2, b2", "def get_weights(self) -> List[np.matrix]:\n to_ret = []\n for layer in self.__layers:\n to_ret.append(layer.get_coefficients())\n return to_ret", "def get_weights(self):\n return [self.W]", "def get_weights(self):\n return [self.W]", "def weights_lst(self):\n assert self.sess is not None, \"Model has not been fitted yet!\"\n return self.sess.run(self.W_lst)", "def get_weights(self):", "def GetWeights(self) -> numpy.ndarray:\n return numpy.concatenate(list(\n variable_ndarray.flatten() for variable_ndarray in\n self._layer.get_weights()))", "def _create_weights(self):\n gate_size = self._hidden_size * self._num_gates\n # Compute the shape of weight and bias.\n matrix_shapes, bias_shapes = [], []\n for layer in range(self._num_layers):\n for direction in range(self._num_directions):\n layer_input_size = self._input_size if layer == 0 \\\n else self._hidden_size * self._num_directions\n w_ih_shape = [gate_size, layer_input_size]\n w_hh_shape = [gate_size, self._hidden_size]\n b_ih_shape, b_hh_shape = [gate_size], [gate_size]\n matrix_shapes.extend([w_ih_shape, w_hh_shape])\n bias_shapes.extend([b_ih_shape, b_hh_shape])\n # Create single float32 weights.\n weights_count = 0\n self._weights_shapes = matrix_shapes + bias_shapes\n for shape in self._weights_shapes:\n weights_count += math_util.prod(shape)\n self._weights = Tensor([weights_count])\n self._weights.requires_grad = True", "def get_weights(self):\n weights = {}\n for idx, layer in enumerate(self.model.layers):\n if len(layer.get_weights())>0:\n weights[idx] = layer.get_weights()[0]\n else:\n weights[idx] = [] \n return weights", "def get_weights_from_dna(self):\n\n W1_size = self.n_input*self.n_hidden_1\n W2_size = self.n_hidden_1*self.n_hidden_2\n W3_size = self.n_hidden_2*self.n_output\n\n start_W1, end_W1 = 0, W1_size\n start_B1, end_B1 = end_W1, end_W1 + self.n_hidden_1\n start_W2, end_W2 = end_B1, end_B1 + W2_size\n start_B2, end_B2 = end_W2, end_W2 + self.n_hidden_2\n start_W3, end_W3 = end_B2, end_B2 + W3_size\n start_B3, end_B3 = end_W3, end_W3 + self.n_output\n\n W1 = self.dna[:, start_W1:end_W1].reshape(self.n_hidden_1, self.n_input)\n B1 = self.dna[:, start_B1:end_B1].reshape(self.n_hidden_1, 1)\n W2 = self.dna[:, start_W2:end_W2].reshape(self.n_hidden_2, self.n_hidden_1)\n B2 = self.dna[:, start_B2:end_B2].reshape(self.n_hidden_2, 1)\n W3 = self.dna[:, start_W3:end_W3].reshape(self.n_output, self.n_hidden_2)\n B3 = self.dna[:, start_B3:end_B3].reshape(self.n_output, 1)\n\n return W1, W2, W3, B1, B2, B3", "def get_weights_and_biases_from_Onnx(self):\n\t\t\n\t\tpass", "def get_weights(self):\n return self.weights\n #print(W)", "def layer_weights(self):\n return [l.W for l in self.children if isinstance(l, G.layers.DenseLayer)]", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def instantiate_weights(self):\n with tf.name_scope(\"decoder_init_state\"):\n self.W_initial_state = tf.get_variable(\"W_initial_state\", shape=[self.hidden_size, self.hidden_size*2], initializer=self.initializer)\n self.b_initial_state = tf.get_variable(\"b_initial_state\", shape=[self.hidden_size*2])\n with tf.name_scope(\"embedding_projection\"): # embedding matrix\n self.Embedding = tf.get_variable(\"Embedding\", shape=[self.vocab_size, self.embed_size],initializer=self.initializer) # [vocab_size,embed_size] tf.random_uniform([self.vocab_size, self.embed_size],-1.0,1.0)\n self.Embedding_label = tf.get_variable(\"Embedding_label\", shape=[self.num_classes, self.embed_size*2],dtype=tf.float32) #,initializer=self.initializer\n self.W_projection = tf.get_variable(\"W_projection\", shape=[self.hidden_size*2, self.num_classes],\n initializer=self.initializer) # [embed_size,label_size]\n self.b_projection = tf.get_variable(\"b_projection\", shape=[self.num_classes])\n\n # GRU parameters:update gate related\n with tf.name_scope(\"gru_weights_encoder\"):\n self.W_z = tf.get_variable(\"W_z\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_z = tf.get_variable(\"U_z\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_z = tf.get_variable(\"b_z\", shape=[self.hidden_size])\n # GRU parameters:reset gate related\n self.W_r = tf.get_variable(\"W_r\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_r = tf.get_variable(\"U_r\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_r = tf.get_variable(\"b_r\", shape=[self.hidden_size])\n\n self.W_h = tf.get_variable(\"W_h\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_h = tf.get_variable(\"U_h\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_h = tf.get_variable(\"b_h\", shape=[self.hidden_size])\n\n with tf.name_scope(\"gru_weights_decoder\"):\n self.W_z_decoder = tf.get_variable(\"W_z_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_z_decoder = tf.get_variable(\"U_z_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.C_z_decoder = tf.get_variable(\"C_z_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer) #TODO\n self.b_z_decoder = tf.get_variable(\"b_z_decoder\", shape=[self.hidden_size*2])\n # GRU parameters:reset gate related\n self.W_r_decoder = tf.get_variable(\"W_r_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_r_decoder = tf.get_variable(\"U_r_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.C_r_decoder = tf.get_variable(\"C_r_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer) #TODO\n self.b_r_decoder = tf.get_variable(\"b_r_decoder\", shape=[self.hidden_size*2])\n\n self.W_h_decoder = tf.get_variable(\"W_h_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_h_decoder = tf.get_variable(\"U_h_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer) #TODO\n self.C_h_decoder = tf.get_variable(\"C_h_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer)\n self.b_h_decoder = tf.get_variable(\"b_h_decoder\", shape=[self.hidden_size*2])\n\n with tf.name_scope(\"full_connected\"):\n self.W_fc=tf.get_variable(\"W_fc\",shape=[self.hidden_size*2,self.hidden_size])\n self.a_fc=tf.get_variable(\"a_fc\",shape=[self.hidden_size])", "def get_weights(self):\n return self.model.get_weights()", "def get_weights(self):\n return self.model.get_weights()", "def build_layer(self) :\n inputsWithBias = self.input_count + 1\n self.weights = np.random.rand(inputsWithBias, self.node_count)\n self.weights_and_activations = (self.weights, self.activations)", "def targets_weights_fn(self):\n return common_layers.weights_all", "def weights(self):\n return [x.numpy() for x in self.core.w]", "def get_forward_parameters(self):\n if self.bias is not None:\n return [self.weights, self.bias]\n else:\n return [self.weights]", "def instantiate_weights(self):\n self.product_embeddings = tf.get_variable(\n name='product_embeddings',\n shape=[50000, 300],\n dtype=tf.float32\n )\n self.aisle_embeddings = tf.get_variable(\n name='aisle_embeddings',\n shape=[250, 50],\n dtype=tf.float32\n )\n self.department_embeddings = tf.get_variable(\n name='department_embeddings',\n shape=[50, 10],\n dtype=tf.float32\n )\n self.W_relu = tf.get_variable(\"W_relu\",shape=[670, 30]) #这个参数后续需要自适应\n self.b_relu = tf.get_variable(\"bias_relu\",shape=[30]) \n self.W_projection = tf.get_variable(\"W_projection\",shape=[30, 1]) \n self.b_projection = tf.get_variable(\"bias_projection\",shape=[1])", "def get_weights(layers):\n get_layer_weights = lambda layer: layer.get_weights()[0]\n return list(map(get_layer_weights, layers))", "def get_weights(self):\r\n return self.weights", "def get_weights(self):\r\n return self.weights", "def init_gru(rnn):\n \n def _concat_init(tensor, init_funcs):\n (length, fan_out) = tensor.shape\n fan_in = length // len(init_funcs)\n \n for (i, init_func) in enumerate(init_funcs):\n init_func(tensor[i * fan_in : (i + 1) * fan_in, :])\n \n def _inner_uniform(tensor):\n fan_in = nn.init._calculate_correct_fan(tensor, 'fan_in')\n nn.init.uniform_(tensor, -math.sqrt(3 / fan_in), math.sqrt(3 / fan_in))\n \n for i in range(rnn.num_layers):\n _concat_init(\n getattr(rnn, 'weight_ih_l{}'.format(i)),\n [_inner_uniform, _inner_uniform, _inner_uniform]\n )\n torch.nn.init.constant_(getattr(rnn, 'bias_ih_l{}'.format(i)), 0)\n\n _concat_init(\n getattr(rnn, 'weight_hh_l{}'.format(i)),\n [_inner_uniform, _inner_uniform, nn.init.orthogonal_]\n )\n torch.nn.init.constant_(getattr(rnn, 'bias_hh_l{}'.format(i)), 0)", "def _weights(self, layer_idx, expected_layer_name):\n # wb = self.vgg_layers[0][layer_idx][0][0][2] # based on old vgg-model shape\n wb = self.vgg_layers[layer_idx][0][0][0]\n W = wb[0][0]\n b = wb[0][1]\n # layer_name = self.vgg_layers[0][layer_idx][0][0][0][0] # based on old vgg-model shape\n layer_name = self.vgg_layers[layer_idx][0][0][3][0]\n\n # assert layer_name == expected_layer_name\n\n return W, b", "def get_weights(net):\n return [p.data for p in net.parameters()]", "def inputs_weights_init(self):\n input_user, input_item, input_rating = self.inputs_init()\n user_embeddings, item_embeddings = self.embeddings_layers_init()\n\n return input_user, input_item, input_rating, user_embeddings, item_embeddings", "def get_weights(self):\n return self.__weights", "def u_weights(self):\n for i in range(self.n_inputs):\n self._q_neuron.cx(self._weights[i], self.inputs[i])", "def get_weights(self):\n return self.weights", "def get_weights(self):\n return self.weights", "def get_model_and_tile_weights(model):\n weight = model.weight.data.detach().cpu().numpy()\n bias = model.bias.data.detach().cpu().numpy()\n analog_weight, analog_bias = model.analog_tile.get_weights()\n analog_weight = analog_weight.detach().cpu().numpy().reshape(weight.shape)\n analog_bias = analog_bias.detach().cpu().numpy()\n return weight, bias, analog_weight, analog_bias", "def getWeights(self):\n return self.W1, self.W2", "def init_gru(rnn):\n\n def _concat_init(tensor, init_funcs):\n (length, fan_out) = tensor.shape\n fan_in = length // len(init_funcs)\n\n for (i, init_func) in enumerate(init_funcs):\n init_func(tensor[i * fan_in: (i + 1) * fan_in, :])\n\n def _inner_uniform(tensor):\n fan_in = nn.init._calculate_correct_fan(tensor, 'fan_in')\n nn.init.uniform_(tensor, -math.sqrt(3 / fan_in), math.sqrt(3 / fan_in))\n\n for i in range(rnn.num_layers):\n _concat_init(\n getattr(rnn, 'weight_ih_l{}'.format(i)),\n [_inner_uniform, _inner_uniform, _inner_uniform]\n )\n torch.nn.init.constant_(getattr(rnn, 'bias_ih_l{}'.format(i)), 0)\n\n _concat_init(\n getattr(rnn, 'weight_hh_l{}'.format(i)),\n [_inner_uniform, _inner_uniform, nn.init.orthogonal_]\n )\n torch.nn.init.constant_(getattr(rnn, 'bias_hh_l{}'.format(i)), 0)", "def initWeights(self):\n self.weights = []\n self.bias = []\n for i, dim in enumerate(self.dimensions[1:]):\n self.weights.append(np.random.uniform(-1,1,(self.dimensions[i],dim)))\n self.bias.append(np.random.uniform(-1,1,dim))", "def get_parameters(self):\n if self.add_bias:\n params = np.concatenate((self.bias, self.W), 0)\n else:\n params = self.W\n return params", "def get_weights(self):\n return self._weights", "def get_weights(self):\n return self._weights", "def get_weights(self):\n return self._weights", "def weights(self):\n return self._weights", "def weights(self):\n return self._weights", "def weights(self):\n return self._weights", "def weights(self):\n return self._weights", "def weights_(self):\n return self.get_tensor_value('logistic_regression/weights:0')", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def get_variables(self) -> typing.List:\n parts = (self.neural_net.encoder, self.neural_net.predictor, self.neural_net.dynamics)\n return [v for v_list in map(lambda n: n.weights, parts) for v in v_list]", "def init_weights(self):\n\n params = torch.load(self.resnet_weight)\n\n self.fc1.weight.data = params['state_dict']['module.fc.weight'].clone()\n self.fc1.bias.data = params['state_dict']['module.fc.bias'].clone()\n\n\n r = np.sqrt(1.) / np.sqrt(self.fc3.in_features +\n self.fc3.out_features)\n self.fc3.weight.data.uniform_(-r, r)\n self.fc3.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc2.in_features +\n self.fc2.out_features)\n self.fc2.weight.data.uniform_(-r, r)\n self.fc2.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc4.in_features +\n self.fc4.out_features)\n self.fc4.weight.data.uniform_(-r, r)\n self.fc4.bias.data.fill_(0)", "def get_weights(model) -> Weights:\n return [val.cpu().numpy() for _, val in model.state_dict().items()]", "def get_weights(self):\n return []", "def initialize_weights_and_bias(self, X_train):\n n_samples, n_features = np.shape(X_train)\n n_output = 1 \n \n # This is the numeber of gridcells and we want to make one prediction pr cell. \n # It this doesn't work calculate the number of griddcells.\n\n self.b_h = [] #np.ones((self.n_hidden_layers, self.n_hidden[0]))\n self.W_h = []\n\n for i in range(len(self.n_hidden)):\n if (i == 0):\n self.W_h.append(self.random.normal(loc=0.0, scale=0.1, size=(n_features, self.n_hidden[0])))\n self.b_h.append(np.ones(self.n_hidden[0]))\n else:\n self.W_h.append(self.random.normal(loc=0.0, scale=0.1, size=(self.n_hidden[i-1], self.n_hidden[i])))\n self.b_h.append(np.ones(self.n_hidden[i])) \n \n self.b_out = [1]\n self.W_out = self.random.normal(loc=0.0, scale=0.1, size=(self.n_hidden[-1], n_output))", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def init(InputUnits, OutputUnits, numHiddenLayer, HiddenUnits=None):\n global HiddenUnit\n all_weights = []\n if HiddenUnits is None:\n HiddenUnits = []\n elif isinstance(HiddenUnits, int):\n HiddenUnits = [HiddenUnits]\n\n # for InputLayer\n\n parameters = generate(HiddenUnits[0], InputUnits)\n allWeights = mat.r_[parameters.flatten()]\n\n if numHiddenLayer > 1:\n for i in range(numHiddenLayer):\n if i < numHiddenLayer-1:\n parameters = generate(HiddenUnits[i+1], HiddenUnits[i])\n allWeights = mat.r_[allWeights, parameters.flatten()]\n else:\n parameters = generate(OutputUnits, HiddenUnits[i])\n allWeights = mat.r_[allWeights, parameters.flatten()]\n\n else:\n # for output layer\n parameters = generate( OutputUnits, HiddenUnits[0])\n allWeights = mat.r_[allWeights, parameters.flatten()]\n\n return allWeights", "def _build_bdrnn_graph(self, hparams):\n\n sample = self.iterator.get_next()\n\n inputs, tgt_outputs, seq_len = sample\n\n # linear projection to state size\n #with tf.variable_scope(\"bdrnn_in\", dtype=tf.float32):\n # inputs = tf.layers.dense(inputs=inputs,\n # units=hparams.input_proj_size,\n # kernel_initializer=tf.glorot_uniform_initializer())\n\n lm_fw_cell = []\n lm_bw_cell = []\n lm_init_state_fw = []\n lm_init_state_bw = []\n if hparams.pretrained:\n with tf.variable_scope(\"lm_rnn\", dtype=tf.float32):\n # create lm\n with tf.variable_scope(\"fw\", dtype=tf.float32):\n lm_fw_cell = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=1,\n mode=self.mode)\n # build the cell so it is in the correct scope\n # NOTE: this is hard coded\n lm_fw_cell[0].build([None, hparams.num_features])#hparams.input_proj_size])\n lm_init_state_fw = _get_initial_state([lm_fw_cell[0].state_size], tf.shape(inputs)[0], \"lm\")\n with tf.variable_scope(\"bw\", dtype=tf.float32):\n lm_bw_cell = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=1,\n mode=self.mode)\n # NOTE: this is hard coded\n lm_bw_cell[0].build([None, hparams.num_features])#hparams.input_proj_size])\n lm_init_state_bw = _get_initial_state([lm_bw_cell[0].state_size], tf.shape(inputs)[0], \"lm\")\n\n lm_outputs, lm_states = tf.nn.bidirectional_dynamic_rnn(lm_fw_cell[0],\n lm_bw_cell[0],\n inputs,\n sequence_length=seq_len,\n initial_state_fw=lm_init_state_fw[0],\n initial_state_bw=lm_init_state_bw[0],\n dtype=tf.float32)\n # optionally fix the LM weights\n if hparams.fixed_lm:\n print(\"Fixing pretrained language models.\")\n lm_outputs = tf.stop_gradient(lm_outputs)\n lm_outputs = tf.concat([lm_outputs[0], lm_outputs[1]], axis=-1)\n lm_outputs = tf.layers.dense(lm_outputs,\n 20,\n kernel_initializer=tf.glorot_uniform_initializer())\n lm_outputs = tf.concat([lm_outputs, inputs], axis=-1)\n\n\n #lm_outputs = tf.concat([lm_outputs[0], lm_outputs[1], inputs], axis=-1)\n else:\n lm_outputs = tf.concat(lm_outputs, axis=-1)\n\n\n\n with tf.variable_scope(\"bdrnn\", dtype=tf.float32) as bdrnn_scope:\n # create bdrnn\n with tf.variable_scope(\"fw\", dtype=tf.float32):\n fw_cells = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=hparams.num_layers,\n mode=self.mode\n )\n init_state_fw = _get_initial_state([cell.state_size for cell in fw_cells],\n tf.shape(inputs)[0], \"initial_state_fw\")\n\n with tf.variable_scope(\"bw\", dtype=tf.float32):\n bw_cells = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=hparams.num_layers,\n mode=self.mode,\n )\n\n init_state_bw = _get_initial_state([cell.state_size for cell in bw_cells],\n tf.shape(inputs)[0], \"initial_state_bw\")\n # NOTE: this is commented because the lm cells and states are separated now\n #fw_cells = lm_fw_cell + fw_cells\n #bw_cells = lm_bw_cell + bw_cells\n #init_state_fw = lm_init_state_fw + init_state_fw\n #init_state_bw = lm_init_state_bw + init_state_bw\n\n # run bdrnn\n combined_outputs, output_state_fw, output_state_bw = \\\n tf.contrib.rnn.stack_bidirectional_dynamic_rnn(cells_fw=fw_cells,\n cells_bw=bw_cells,\n inputs=lm_outputs,\n sequence_length=seq_len,\n initial_states_fw=init_state_fw,\n initial_states_bw=init_state_bw,\n dtype=tf.float32,\n scope=bdrnn_scope)\n # outputs is a tuple (output_fw, output_bw)\n # output_fw/output_bw are tensors [batch_size, max_time, cell.output_size]\n # outputs_states is a tuple (output_state_fw, output_state_bw) containing final states for\n # forward and backward rnn\n\n # concatenate the outputs of each direction\n #combined_outputs = tf.concat([outputs[0], outputs[1]], axis=-1)\n\n with tf.variable_scope(\"bdrnn_out\", dtype=tf.float32):\n # dense output layers\n dense1 = tf.layers.dense(inputs=combined_outputs,\n units=hparams.num_dense_units,\n kernel_initializer=tf.glorot_uniform_initializer(),\n activation=tf.nn.relu,\n use_bias=True)\n drop1 = tf.layers.dropout(inputs=dense1,\n rate=hparams.dropout,\n training=self.mode==tf.contrib.learn.ModeKeys.TRAIN)\n dense2 = tf.layers.dense(inputs=drop1,\n units=hparams.num_dense_units,\n kernel_initializer=tf.glorot_uniform_initializer(),\n activation=tf.nn.relu,\n use_bias=True)\n drop2 = tf.layers.dropout(inputs=dense2,\n rate=hparams.dropout,\n training=self.mode==tf.contrib.learn.ModeKeys.TRAIN)\n\n logits = tf.layers.dense(inputs=drop2,\n units=hparams.num_labels,\n use_bias=False)\n\n # mask out entries longer than target sequence length\n mask = tf.sequence_mask(seq_len, dtype=tf.float32)\n\n crossent = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,\n labels=tgt_outputs,\n name=\"crossent\")\n\n # divide loss by batch_size * mean(seq_len)\n loss = tf.reduce_sum(crossent*mask)/tf.cast(hparams.batch_size, tf.float32)\n\n metrics = []\n update_ops = []\n if self.mode == tf.contrib.learn.ModeKeys.EVAL:\n # mean eval loss\n loss, loss_update = tf.metrics.mean(values=loss)\n\n predictions = tf.argmax(input=logits, axis=-1)\n tgt_labels = tf.argmax(input=tgt_outputs, axis=-1)\n acc, acc_update = tf.metrics.accuracy(predictions=predictions,\n labels=tgt_labels,\n weights=mask)\n # confusion matrix\n targets_flat = tf.reshape(tgt_labels, [-1])\n predictions_flat = tf.reshape(predictions, [-1])\n mask_flat = tf.reshape(mask, [-1])\n cm, cm_update = streaming_confusion_matrix(labels=targets_flat,\n predictions=predictions_flat,\n num_classes=hparams.num_labels,\n weights=mask_flat)\n tf.add_to_collection(\"eval\", cm_summary(cm, hparams.num_labels))\n metrics = [acc, cm]\n update_ops = [loss_update, acc_update, cm_update]\n\n return logits, loss, metrics, update_ops", "def get_weights_and_biases_from_pyTorch(self):\n\t\t\n\t\tilayer_id = 28\n\n\t\tmodel = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\twatcher = ww.WeightWatcher(model = model, log_level=logging.WARNING)\n\t\t\n\t\tdetails = watcher.describe(layers=self.fclayers)\n\t\tprint(details)\n\t\t\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\t\n\t\tparams = ww.DEFAULT_PARAMS.copy()\n\t\tparams[ww.ADD_BIASES] = True\n\t\t\n\t\tweights = watcher.get_Weights(layer=ilayer_id, params=params)\n\t\tself.assertEqual(len(weights),1)\n\t\t\n\t\tW = weights[0]\n\t\tself.assertEqual(np.max(W.shape),N)\n\t\tself.assertEqual(np.min(W.shape),M)\n\n\t\tpass", "def getWeights(self,squeeze=False):\n weights = self.weights\n if squeeze:\n weights = weights.squeeze()\n return np.copy(weights)", "def generator(real_init): #-1, 5, 3\n with tf.variable_scope('rnn_gen', reuse = tf.AUTO_REUSE, initializer=tf.contrib.layers.xavier_initializer()) as scope:\n x_init = real_init[:,:,0] #-1, 8\n y_init = real_init[:,:,1] #-1, 8\n z_init = real_init[:,:,2] #-1, 8\n \n with tf.variable_scope('x_rw', reuse = tf.AUTO_REUSE, initializer=tf.contrib.layers.xavier_initializer()) as scope:\n x_w1 = tf.get_variable('x_w1', [6, 3])\n x_b1 = tf.get_variable('x_b1', [3])\n x_w2 = tf.get_variable('x_w2', [9, 3])\n x_b2 = tf.get_variable('x_b2', [3])\n x_w3 = tf.get_variable('x_w3', [3, 1])\n x_b3 = tf.get_variable('x_b3', [1])\n x_w4 = tf.get_variable('x_w4', [3, 1])\n x_b4 = tf.get_variable('x_b4', [1])\n \n x_full = x_init#-1, 8\n for i in range(104):\n x_layer1 = tf.nn.elu(tf.matmul(x_init, x_w1) + x_b1)#-1, 4\n x_layer2 = tf.concat([x_init, x_layer1], 1)#-1, 12\n x_layer3 = tf.nn.elu(tf.matmul(x_layer2, x_w2) + x_b2)#-1, 4\n \n x_mean = tf.sigmoid(tf.matmul(x_layer3, x_w3) + x_b3)#-1, 1\n x_gamma = tf.sigmoid(tf.matmul(x_layer3, x_w4) + x_b4)#-1, 1\n x_delta = x_mean + x_gamma * tf.random_normal(tf.shape(x_gamma), dtype = tf.float32)#-1, 1\n x_delta_denorm = (x_delta * 2 - 1) / 80\n x_output = tf.expand_dims(x_full[:,-1], 1) + x_delta_denorm\n x_full = tf.concat([x_full, x_output], axis = 1)#-1, 10 + 1\n x_init = x_full[:,-6:]\n \n with tf.variable_scope('y_rw', reuse = tf.AUTO_REUSE, initializer=tf.contrib.layers.xavier_initializer()) as scope:\n y_w1 = tf.get_variable('y_w1', [6, 3])\n y_b1 = tf.get_variable('y_b1', [3])\n y_w2 = tf.get_variable('y_w2', [9, 3])\n y_b2 = tf.get_variable('y_b2', [3])\n y_w3 = tf.get_variable('y_w3', [3, 1])\n y_b3 = tf.get_variable('y_b3', [1])\n y_w4 = tf.get_variable('y_w4', [3, 1])\n y_b4 = tf.get_variable('y_b4', [1])\n \n y_full = y_init#-1, 8\n for i in range(104):\n y_layer1 = tf.nn.elu(tf.matmul(y_init, y_w1) + y_b1)\n y_layer2 = tf.concat([y_init, y_layer1], 1)#-1, 12\n y_layer3 = tf.nn.elu(tf.matmul(y_layer2, y_w2) + y_b2)#-1, 4\n \n y_mean = tf.sigmoid(tf.matmul(y_layer3, y_w3) + y_b3) #-1, 1\n y_gamma = tf.sigmoid(tf.matmul(y_layer3, y_w4) + y_b4) #-1, 1\n y_delta = y_mean + y_gamma * tf.random_normal(tf.shape(y_gamma), dtype = tf.float32)#-1, 1\n y_delta_denorm = (y_delta * 5 - 2.7) / 1700\n y_output = tf.expand_dims(y_full[:,-1], 1) + y_delta_denorm\n y_full = tf.concat([y_full, y_output], axis = 1)#-1, 10 + 1, 1\n y_init = y_full[:,-6:]\n \n with tf.variable_scope('z_rw', reuse = tf.AUTO_REUSE, initializer=tf.contrib.layers.xavier_initializer()) as scope:\n z_w1 = tf.get_variable('z_w1', [6, 3])\n z_b1 = tf.get_variable('z_b1', [3])\n z_w2 = tf.get_variable('z_w2', [9, 3])\n z_b2 = tf.get_variable('z_b2', [3])\n z_w3 = tf.get_variable('z_w3', [3, 1])\n z_b3 = tf.get_variable('z_b3', [1])\n z_w4 = tf.get_variable('z_w4', [3, 1])\n z_b4 = tf.get_variable('z_b4', [1])\n \n z_full = z_init#-1, 8\n for i in range(104):\n z_layer1 = tf.nn.elu(tf.matmul(z_init, z_w1) + z_b1)\n z_layer2 = tf.concat([z_init, z_layer1], 1)#-1, 12\n z_layer3 = tf.nn.elu(tf.matmul(z_layer2, z_w2) + z_b2)#-1, 4\n \n z_mean = tf.sigmoid(tf.matmul(z_layer3, z_w3) + z_b3) #-1, 1\n z_gamma = tf.sigmoid(tf.matmul(z_layer3, z_w4) + z_b4) #-1, 1\n z_delta = z_mean + z_gamma * tf.random_normal(tf.shape(z_gamma), dtype = tf.float32)#-1, 1\n z_delta_denorm = (z_delta * 3 - 2.2) / 300\n z_output = tf.expand_dims(z_full[:,-1], 1) + z_delta_denorm\n z_full = tf.concat([z_full, z_output], axis = 1)#-1, 10 + 1, 1\n z_init = z_full[:,-6:]\n \n rand_layer1 = tf.concat([tf.expand_dims(x_full, 2), tf.expand_dims(y_full, 2), tf.expand_dims(z_full, 2)], axis = 2)#-1, 200, 3\n rand_layer1_reshape = tf.reshape(rand_layer1, shape = (-1, 110, 3, 1))\n output_filt = tf.ones(shape = (11, 1, 1, 1)) / 11\n \n rand_layer2 = tf.nn.conv2d(rand_layer1_reshape, output_filt, strides = [1,1,1,1], padding = 'VALID')\n \n return tf.reshape(rand_layer2, shape = (-1, 100, 3)) #-1, 110, 3", "def sample(self) -> Tuple[torch.Tensor, ...]:\n assert len(self) >= self.batch_size\n\n indices = np.random.choice(\n len(self.buffer), size=self.batch_size, replace=False\n )\n\n states, actions, rewards, next_states, dones = [], [], [], [], []\n\n for i in np.nditer(indices):\n s, a, r, n_s, d = self.buffer[i]\n states.append(np.array(s, copy=False))\n actions.append(np.array(a, copy=False))\n rewards.append(np.array(r, copy=False))\n next_states.append(np.array(n_s, copy=False))\n dones.append(np.array(float(d), copy=False))\n\n states_ = torch.FloatTensor(np.array(states)).to(device)\n actions_ = torch.FloatTensor(np.array(actions)).to(device)\n rewards_ = torch.FloatTensor(np.array(rewards).reshape(-1, 1)).to(device)\n next_states_ = torch.FloatTensor(np.array(next_states)).to(device)\n dones_ = torch.FloatTensor(np.array(dones).reshape(-1, 1)).to(device)\n\n if torch.cuda.is_available():\n states_ = states_.cuda(non_blocking=True)\n actions_ = actions_.cuda(non_blocking=True)\n rewards_ = rewards_.cuda(non_blocking=True)\n next_states_ = next_states_.cuda(non_blocking=True)\n dones_ = dones_.cuda(non_blocking=True)\n\n return states_, actions_, rewards_, next_states_, dones_", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] if w is not None]", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def encode_weights(self):\n weights = []\n for param in self.global_policy.parameters():\n shape = list(param.shape)\n param_list = torch.flatten(param.data).tolist()\n weights.append(Tensor(shape, param_list))\n\n return weights", "def get_weights(self):\n\n # Check if fit had been called\n check_is_fitted(self, 'weights_')\n\n w = self.weights_.reshape(self.X_.shape[1], self.n_classes_, order='F')\n\n return w", "def n_step_birnn(\n n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh', **kwargs):\n return n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,\n activation, use_bi_direction=True)", "def get_weights(self):\n return self._weight", "def _setup(self) -> None:\n #TODO: type\n self.activation = self.params['activation']\n\n self.batchsize: int = self.params['batchsize']\n\n self.input_shape: Tuple[int,int,int] = self.params['input_shape']\n\n self.d: int = self.input_shape[1]\n assert(not self.d == 0)\n\n self.n: int = int(sqrt(self.input_shape[2]))\n assert(not self.n == 0)\n\n self.dtype: type = self.params['dtype']\n\n # initialize weights\n self.W: List[tf.Tensor] = []\n \n for i in range(3):\n #TODO: type\n w_init = self.params['initializer_w']\n if self.params['initializer_w' + str(i)] is not None:\n w_init = self.params['initializer_w' + str(i)]\n\n w_stddev: float = self.params['stddev_w']\n if self.params['stddev_w' + str(i)] is not None:\n w_stddev = self.params['stddev_w' + str(i)]\n\n self.W.append(tf.get_variable(\"weights_\" + str(i),\n shape = (self.d, (self.d if i < 2 else 2 * self.d)),\n dtype = self.dtype,\n initializer = w_init(stddev=w_stddev)))\n\n #TODO: type\n b_init = self.params['initializer_b']\n b_stddev = self.params['stddev_b']\n self.B: tf.Tensor = tf.get_variable(\"biases\", shape = (1, self.d, 1),\n dtype = self.dtype,\n initializer = b_init(stddev=b_stddev))\n\n # create/load expand matrix\n with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n self.expand: tf.Tensor = tf.get_variable(\n \"expand\" + str(self.n),\n shape = (self.n, self.n * self.n),\n dtype = self.dtype,\n initializer = tf.constant_initializer(make_expand(self.n)))\n\n # create/load tile matrix\n tile: np.ndarray = np.array([([1] + [0]*(self.n-1))*self.n])\n for i in range(1, self.n):\n tile = np.append(tile, [([0]*i + [1] + [0]*(self.n-1-i))*self.n], 0)\n\n self.tile: tf.Tensor = tf.constant(tile, self.dtype)", "def initialize_weights(self):\n w1 = np.random.uniform(-1.0, 1.0, size = self.n_hidden * (self.n_features + 1)).reshape(self.n_hidden, (self.n_features + 1))/(self.n_features + 1)\n w2 = np.random.uniform(-1.0, 1.0, size=self.n_output*(self.n_hidden+1)).reshape(self.n_output, self.n_hidden+ 1)/(self.n_hidden + 1)\n return w1, w2", "def get_weights(t_vars, sess):\n if TfUtils._is_op_defined(t_vars):\n num_of_layers = len(TfUtils.get_layers(t_vars))\n return [sess.run(t_var) for t_var in t_vars[ : num_of_layers]]\n return [sess.run(t_var) for t_var in t_vars if \"kernel\" in t_var.name]", "def get_weights(self):\n \n w = torch.exp(self._weight) * self.mask_d + self._weight * self.mask_o\n\n w_squared_norm = (w ** 2).sum(-1, keepdim=True)\n \n w = self._diag_weight.exp() * w / w_squared_norm.sqrt()\n \n wpl = self._diag_weight + self._weight - 0.5 * torch.log(w_squared_norm) \n\n return w.t(), wpl.t()[self.mask_d.bool().t()].view(\n self.dim, self.in_features // self.dim, self.out_features // self.dim)", "def generate_weights(n_features, hidden_layer_sizes, n_out):\n\n weights = []\n biases = []\n\n # Weights from input layer to first hidden layer\n weights.append(init_weight(hidden_layer_sizes[0], n_features))\n biases.append(init_bias(hidden_layer_sizes[0]))\n\n # Weights from one hidden layer to the next\n for i in range(1, hidden_layer_sizes.size):\n weights.append(\n init_weight(hidden_layer_sizes[i],hidden_layer_sizes[i - 1]))\n biases.append(init_bias(hidden_layer_sizes[i]))\n\n # Weights from last hidden layer to output layer\n weights.append(init_weight(n_out, hidden_layer_sizes[-1]))\n biases.append(init_bias(n_out))\n\n return weights, biases", "def _compute_weights(self, beta=1):\n N = self.__len__()\n # Allocate memory\n W = np.zeros((N,N), dtype='float')\n \n for i in range(N):\n for j in range(N):\n if i > j:\n W[i,j] = self._kernel(self.flat_image[i],\n self.flat_image[j], \n beta)\n W += W.T \n for i in range(N):\n W[i,i] = 1\n \n return W", "def n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,\n activation, use_bi_direction, **kwargs): # NOQA\n\n argument.check_unexpected_kwargs(\n kwargs, train='train argument is not supported anymore. '\n 'Use chainer.using_config',\n use_cudnn='use_cudnn argument is not supported anymore. '\n 'Use chainer.using_config')\n argument.assert_kwargs_empty(kwargs)\n\n activation_list = ['tanh', 'relu']\n if activation not in activation_list:\n candidate = ','.join(activation_list)\n raise ValueError('Invalid activation: \"%s\". Please select from [%s]'\n % (activation, candidate))\n\n xp = cuda.get_array_module(hx)\n\n if xp is not numpy and chainer.should_use_cudnn('>=auto', 5000):\n states = get_random_state().create_dropout_states(dropout_ratio)\n lengths = [len(x) for x in xs]\n xs = chainer.functions.concat(xs, axis=0)\n\n rnn_mode = 'rnn_%s' % activation\n w = cudnn_rnn_weight_concat(\n n_layers, states, use_bi_direction, rnn_mode, ws, bs)\n\n if use_bi_direction:\n # Bi-directional RNN\n if activation == 'tanh':\n rnn = NStepBiRNNTanh\n elif activation == 'relu':\n rnn = NStepBiRNNReLU\n else:\n # Uni-directional RNN\n if activation == 'tanh':\n rnn = NStepRNNTanh\n elif activation == 'relu':\n rnn = NStepRNNReLU\n\n hy, ys = rnn(n_layers, states, lengths)(hx, w, xs)\n sections = numpy.cumsum(lengths[:-1])\n ys = chainer.functions.split_axis(ys, sections, 0)\n return hy, ys\n\n else:\n\n def f(x, h, c, w, b):\n xw, hw = w\n xb, hb = b\n rnn_in = linear.linear(x, xw, xb) + linear.linear(h, hw, hb)\n if activation == 'tanh':\n return tanh.tanh(rnn_in), None\n elif activation == 'relu':\n return relu.relu(rnn_in), None\n\n hy, _, ys = n_step_rnn_impl(\n f, n_layers, dropout_ratio, hx, None, ws, bs, xs, use_bi_direction)\n return hy, ys", "def pull_weights(self, learning_rate):\n for w in self.weights:\n w.value += learning_rate * w.gradient\n # Reset all the weights' gradient to 0\n # We will not reset all other units' gradient, because all other units should be initialized in next training\n # round, and the init value of gradient is 0\n for w in self.weights:\n w.gradient = 0", "def get_flat_weights(model):\n return torch.cat([p.data.view(-1, 1) for p in model.parameters()], 0)", "def feed_forward(inputs, output_dim, name='', initializer=None):\n _dim = inputs.get_shape().as_list()[1]\n weights = tf.get_variable('weights_{}'.format(name),\n [_dim, output_dim],\n initializer=initializer)\n zero_init = tf.zeros_initializer()\n bias = tf.get_variable('bias_{}'.format(name), shape=output_dim,\n dtype=tf.float32,\n initializer=zero_init)\n output = tf.nn.xw_plus_b(inputs, weights, bias)\n return output", "def influence_weights(sess, n_neighbours, data_size):\n\n # To see the names of the different layers of the neural network\n tf.global_variables()\n\n # Get the weights for a specified layer\n layer_number = 0\n weights = tf.get_default_graph().get_tensor_by_name('layers/fc-layer{:d}/kernel:0'.format(layer_number))\n weights = sess.run(tf.nn.top_k(weights))\n # nn.top_k has as output [[weights][indexes to next node]], so the weights are located at position 0\n weights_numpy = np.array(weights[0])\n\n # Get the working directory of this script\n working_dir = os.path.dirname(os.path.abspath(__file__))\n # Get the file name for saving the numpy array\n file_name = working_dir + \"/output_ANN/HDF5_files/normalized_ANN/{}_norm_ANNweights_{}\".format(\n n_neighbours, data_size)\n # Get an unique file name\n file_name = saving(file_name, save=False, file_type='h5')\n # Save the numpy array as HDF5 file\n h5f = h5py.File(file_name, 'w')\n h5f.create_dataset('dataset_{}'.format(data_size), data=weights_numpy)\n\n return weights_numpy", "def get_conv_weights(self, conv_layer_i=0):\n conv_layer = self.get_conv_layer(conv_layer_i)\n weights = conv_layer.weights[0].numpy()\n weights = np.transpose(weights, [2,1,0])\n return weights", "def build(self, input_layer, trainable=True):\n\n with tf.variable_scope(self.name):\n\n # Create a weight matrix\n input_size = input_layer.get_shape()[-1].value\n\n self.weights = weight_variable([input_size, self.output_size], 'weights', trainable, False)\n self.bias = bias_variable([self.output_size], 'bias', trainable)\n\n # Create the ReLU layer\n self.layer = tf.matmul(input_layer, self.weights) + self.bias\n\n if self.activation_function:\n self.layer = self.activation_function(self.layer)\n\n return self.layer, self.weights, self.bias", "def lecun_ntk_wb(n_hid_layers=5, n_units=50):\n N = [1] + [n_units] * n_hid_layers + [1]\n \n W = []\n B = []\n for i in range(1, len(N)):\n W.append(np.random.randn(N[i], N[i - 1]) / N[i - 1] ** 0.5)\n B.append(np.zeros(N[i]))\n \n return {'N': N, 'W': W, 'B': B}", "def forward(self, inputs, mask=None):\n N = inputs.size(0)\n out = inputs.view(N, 1, -1, self.n_mel_channels) # [N, 1, Ty, n_mels]\n for conv, bn in zip(self.convs, self.bns):\n out = conv(out)\n out = bn(out)\n out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K]\n\n out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K]\n T = out.size(1)\n N = out.size(0)\n out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K]\n if mask is not None:\n out = out.masked_fill(mask.unsqueeze(-1), 0)\n\n self.gru.flatten_parameters()\n memory, out = self.gru(out) # memory --- [N, Ty, E//2], out --- [1, N, E//2]\n\n return memory, out.squeeze(0)" ]
[ "0.7037132", "0.6992914", "0.695443", "0.69183373", "0.68530166", "0.6803836", "0.6534791", "0.64984447", "0.64839584", "0.64703405", "0.6404171", "0.6397448", "0.63523", "0.6348087", "0.6256905", "0.62384343", "0.62384343", "0.62112504", "0.62057334", "0.6160526", "0.6132221", "0.6121456", "0.6092795", "0.6090453", "0.6088111", "0.60871804", "0.60852444", "0.60852444", "0.60852444", "0.60852444", "0.6075956", "0.607401", "0.607401", "0.6040154", "0.60285926", "0.6020391", "0.5996703", "0.5991002", "0.59800553", "0.59720314", "0.59720314", "0.5969568", "0.5964763", "0.5956581", "0.59439266", "0.5936918", "0.5927366", "0.5925697", "0.5925697", "0.5910837", "0.58979493", "0.58946043", "0.5893263", "0.58868253", "0.5881538", "0.5881538", "0.5881538", "0.5842957", "0.5842957", "0.5842957", "0.5842957", "0.583861", "0.58280253", "0.58280253", "0.58280253", "0.58280253", "0.58280253", "0.58016163", "0.57979035", "0.5797783", "0.5779588", "0.57650375", "0.5763204", "0.5760754", "0.5759614", "0.5752538", "0.5741973", "0.5734295", "0.57292473", "0.57263285", "0.571934", "0.5715057", "0.5714445", "0.57118744", "0.5711244", "0.57108885", "0.5683686", "0.5680514", "0.5665428", "0.5656644", "0.5646503", "0.5645532", "0.5639338", "0.56388366", "0.56266993", "0.56259394", "0.5622293", "0.5601045", "0.55995935", "0.55984795" ]
0.60313946
34
Redirect to home page on url /
def index(): return redirect(url_for("home"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))", "def homepage():\n return redirect('index.html')", "def homepage():\n if g.user:\n return redirect(f\"/user/{g.user.id}\")\n else:\n return redirect(\"/landing\")", "def start_page():\n if not _home:\n abort(404)\n return redirect(_home)", "def start_page():\n if not _home:\n abort(404)\n return redirect(_home)", "def homepage():\n return redirect(\"/posts\")", "def home_page():\n return redirect('/users')", "def go_home(request):\n\n url = request.route_url('home', _app_url=get_app_url(request))\n return HTTPFound(location=url)", "def home(request):\n return redirect('commprod/')", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def root_redirect():\r\n return redirect(url_for(\"display_top\"))", "def homepage( request ):\n if \"email\" in request.session:\n return redirect( '/home' )\n return render_to_response( 'index.html' )", "def toLanding():\n return redirect(url_for('landingurl'))", "def idx(_request):\n return HttpResponseRedirect('/home')", "def home(request):\n if request.user.is_authenticated:\n return redirect('/start')\n return render(request, 'home/home.html')", "def go_home(self):\r\n if self.home_url is not None:\r\n self.set_url(self.home_url)", "def homepage_redirect():\n return redirect('/upload_file')", "def home(request):\n default_url = reverse(root_nodes)\n\n if request.user.moderator_profile != None:\n home_url = request.user.moderator_profile.get_home_url()\n if home_url != None:\n return HttpResponseRedirect(home_url)\n\n return HttpResponseRedirect(default_url)", "def home():\n # if session.get('username'):\n # return redirect(url_for('categories'))\n # else:\n return render_template('home.html')", "def welcome_page():\n return redirect(\"/static/welcome.html\")", "def home_page():\n if not g.user:\n flash(\"Please login to view.\", \"warning\")\n return redirect('/login')\n return render_template('index.html')", "def home(request):\n # if request.user.is_authenticated():\n # return redirect('/fastapp')\n return context()", "def home(request):\n assert isinstance(request, HttpRequest)\n return redirect('/departments')", "def home(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('done')\n else:\n return render_to_response('home.html', RequestContext(request))", "def home():\n\n if not current_user.is_authenticated:\n return redirect(url_for('login'))\n else:\n return redirect(url_for('show_registrations'))", "def redirect_view(request):\n path = request.GET.get(\"to\") or \"/\"\n return redirect(path if path.startswith(\"/\") else f\"/{path}\", permanent=True)", "def home():\n if not session.get('logged_in'):\n return redirect(url_for('welcome'))\n return render_template('home.html', filename=\"yarg.jpg\")", "def index_file():\n return redirect(\"/\")", "def home_page():\n return redirect('/register')", "def home(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('logged-in')\n else:\n home_view = 1\n return render_to_response('content/home.html', {'version': version, \"home_view\":home_view},\n RequestContext(request))", "def home_view(request):\n return HttpResponseRedirect('/schedule/')", "def root1(request):\n\ttemplate = 'main'\n\treturn redirect(template)", "def redirect(url):", "def home(request):\n #print (\"home\")\n if request.user.is_authenticated():\n return redirect('done')\n return context()", "def index():\n if 'name' in session:\n return render_template('home.html')\n return redirect(url_for('log_in'))", "def home_redirect(request):\n if request.user.is_authenticated() and request.user.is_staff:\n return redirect(\"volunteers\")\n elif request.user.is_authenticated() and not request.user.is_superuser:\n related_volunteer = get_object_or_404(Volunteer, user_id=request.user.pk)\n return redirect(\"edit-volunteer-profile\", volunteer_id=related_volunteer.pk)\n else:\n return redirect(\"new-volunteer\")", "def index():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n return render_template('index.html')", "def test_home_page_redirect_when_loggin_attempt_successful(self):\n\t\tpass", "def home():\n\n form = HomePageForm()\n if form.validate_on_submit():\n return redirect(url_for('character'))\n return render_template('index.html', form=form)", "def index(request):\n\n\tif request.user.is_authenticated:\n\t\treturn HttpResponseRedirect('home')\n\treturn HttpResponseRedirect('login')", "def root(request):\n\ttemplate = 'bfbot/main'\n\treturn redirect(template)", "def entry_page():\n return redirect(url_for('index'))", "def redir_index():\n return redirect(url_for(\"index\"), code=301)", "def index():\n return redirect('/client/index.html')", "def view_home(self):\n with self.client.get(\"/home\", catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.status_code > 200 and r_hist.status_code < 400:\n response.failure(\"Not logged on: Got redirect to /login\")", "def home_view(request):\n if request.authenticated_userid:\n return HTTPFound(location=request.route_url('app_view')) # pragma no cover\n return {} # pragma no cover", "def handle_forbidden_for_homepage(self, request):\n\n login_url = request.link(Auth.from_request_path(request), name='login')\n\n if URL(request.url).path() == '/':\n return morepath.redirect(login_url)\n\n return handle_forbidden(self, request)", "def root():\n if request.headers['Accept'] == 'application/json':\n return \"Welcome\\n\\n\", 200\n else:\n return redirect(url_for('index'))", "def get(self, request):\n return redirect('http://localhost:3000/')", "def index():\n return redirect(url_for('second_page'))", "def second_page():\n return redirect(url_for('index'))", "def landing():\n if g.user:\n return render_template('landing.html', user=g.user)\n return redirect(url_for('login'))", "def first_request():\n heroku_url: str = 'https://justice-ndou.herokuapp.com/'\n registered_domain: str = 'https://justice-ndou.herokuapp.com/'\n\n if request.host_url.lower().startswith(heroku_url):\n return redirect(request.host_url.lower().replace(heroku_url, registered_domain)), 301", "def index() -> str:\n return redirect('/students')", "def home(request):\n return render_to_response('index.html')", "def redirect_old_featured(page):\r\n return redirect(url_for('.index', page=page), 301)", "def home():\n logging.info('Entering route: HOME')\n\n logging.info('Rendering template: main.html')\n return render_template('main.html')", "def me():\n if g.USER:\n return redirect(url_for(\"profile\", username=g.USER.username))\n return redirect(url_for(\"home\"))", "def home(request):\n return render_to_response('home.html', {}, RequestContext(request))", "def home(request):\n today = datetime.date.today()\n return HttpResponseRedirect(\"%s/newsletter/%d/%d/%d/\" % (SUBSITE, today.year, today.month, today.day))", "def redirectPage() -> Response:\n # pass in the function name\n return redirect(url_for('view.loadMainPage'))", "def show_index():\r\n if 'username' in flask.session:\r\n return flask.redirect(flask.url_for('home')) # Need to fix redirect\r\n\r\n return flask.render_template(\"index.html\")", "def home():\n if request.method == \"GET\":\n return render_template(\"index.html\", result=False)\n\n # if there is post data, we have the form and need to encode the SQL\n # to pass to the results route.\n #\n # this encoding dance is to protect against the possibility of getting a very\n # long SQL string that breaks something in HTTP get.\n sql = request.form[\"sql\"]\n dialect = request.form[\"dialect\"]\n return redirect(\n url_for(\"routes.fluff_results\", sql=sql_encode(sql), dialect=dialect)\n )", "def home(request):\n if 'member_id' not in request.session:\n return redirect(\"/login/\")\n return render(request, 'esihapp/index1.html')", "def redirector(status, start_response, exc_info=None):\n session['login.pre_uri'] = environ['PATH_INFO']\n session.save()\n start_response('302 Found',[(\"Location\",\"/login\"),(\"Content-type\",\"text\")])\n return []", "def index(request):\n return redirect('polls:index')", "def redirect_dest(fallback):\n dest = request.args.get('next')\n try:\n if dest.startswith('/') or dest.startswith(request.host_url):\n return redirect(dest)\n dest_url = url_for(dest)\n except:\n return redirect(fallback)\n return redirect(dest_url)", "def login_redirect(request):\n return HttpResponseRedirect('/login')", "def _home(self, op, context):\n self.page = \"HOME\"\n return {'FINISHED'}", "def get(self):\n self.redirect('/admin')", "def get(self):\n self.redirect('/admin')", "def home(request):\n\treturn render(request, \"compta/home.html\")", "def redirect(self, path):\n self.get_controller().redirect(path)", "def catch_all(path):\n return redirect('/', code=302)", "def home(request):\r\n return render(request, 'home.html')", "def home():\n return response(\"OK\")", "def test_index_redirect(self):\n response = self.app.get(\"/\")\n self.assertEqual(response.status_code, 302,\n \"/ did not redirect to login when user is not logged in\")\n self.assertTrue(\n response.location.endswith(\"/accounts/login/\"),\n \"Redirect location did not end with /accounts/login/\"\n )", "def home():\n return render_template('home.html',\n face=session.get(app.config['SESSION_KEY'], None))", "async def index(request: Request, user: UserInfo) -> HTTPResponse:\n return redirect('home')", "def home(request):\n return render(request, 'home/index.html')", "def defaultlanding():\n #send user to description page if not logged in\n if not g.user:\n return redirect(url_for('description'))\n #display leaderboard for competition if logged in\n return redirect(url_for('leaderboard'))", "def home(request):\n if request.user.is_authenticated():\n return HttpResponse(\"{0} <a href='/accounts/logout'>exit</a>\".format(request.user))\n else:\n return HttpResponse(\"<a href='/login/vk-oauth2/'>login with VK</a>\")", "def goHome():\n\t#Go to pod home\n\tif screen.lastScreen in screen.protectedScreens:\n\t\tpodScreen.show()\n\telse:\n\t\tsplashScreen.show()", "def home():\n\n return render_template('home_page.html')", "def get(self):\n self.logout()\n self.redirect('/')", "def index():\n redirect(URL('form'))", "def home(result=None):\n print(inspect.stack()[1][3])\n\n if not session.get('logged_in') and not result:\n return render_template('login.html')\n else:\n # Based on the user_id passed, print Details, URLS and all.\n # return render_template('dashboard.html', username=result.name, user_id=result.user_type)\n return render_template('webpage/index1.html', username=result.name, user_id=result.user_type)", "def redirect(self, url):\n raise RequestRedirect(url)", "def index_page():\n\n return redirect(\"/application-form\")\n\n # Alternately, we could make this a Jinja template in `templates/`\n # and return that result of rendering this, like:\n #\n # return render_template(\"index.html\")", "def home(self, *args, **kwargs):\n pass", "def desktop_or_mobile(request):\n url_name = 'home.mobile' if request.MOBILE else 'home'\n return redirect_to(request, url_name, permanent=False)", "def redirect(to):\r\n def _redirect(environ, start_response):\r\n args, kwargs = environ['wsgiorg.routing_args']\r\n start_response('301 MOVED PERMANENTLY',\r\n [('Location', to.format(*args, **kwargs))])\r\n return []\r\n return _redirect", "def get(self):\n self.logout()\n self.redirect('/blog/login')", "def news0_redirect(request):\n return redirect('news:news', start_id='0')", "def home_page(request):\r\n return render(request, 'ez_main/home_page.html')", "def get_redirect_url(self):\n return reverse('accounts:home')", "def home():\n\n this_dir = os.path.dirname(__file__)\n home_file = app.config['HOME_PAGE_FOLDER'] + 'homepage.html'\n try:\n homepage = open(os.path.join(this_dir, home_file), \"r\").read()\n # Store configuration file values\n except FileNotFoundError:\n homepage = ''\n # Keep preset values\n\n resp = make_response(render_template(\"home.html\", page=\"index\",\n homedoc=homepage, err=request.args.get('err', default=None)))\n if request.cookies.get('lang') is None:\n lang = get_locale()\n resp.set_cookie('lang', lang)\n return resp", "def home(self):\n self.goto(0, 0)" ]
[ "0.8391641", "0.80959", "0.8045699", "0.8035703", "0.8035703", "0.78215", "0.7814849", "0.7806786", "0.75715935", "0.755005", "0.755005", "0.755005", "0.745654", "0.7446087", "0.73788244", "0.7322582", "0.73209256", "0.7203898", "0.71697754", "0.71356803", "0.7109369", "0.7109197", "0.7078182", "0.7071943", "0.7051265", "0.7048573", "0.69983816", "0.69805485", "0.691623", "0.6914428", "0.69098914", "0.6878491", "0.6875465", "0.6859419", "0.68276894", "0.68254775", "0.67906827", "0.6789351", "0.67627335", "0.67348355", "0.67242616", "0.670553", "0.6696331", "0.66711575", "0.6667568", "0.664376", "0.66424817", "0.66147786", "0.6609697", "0.6568546", "0.6562735", "0.65593284", "0.6520643", "0.6512826", "0.6507448", "0.6503651", "0.6502355", "0.6442668", "0.6420771", "0.64037657", "0.6385458", "0.63704425", "0.636354", "0.63415796", "0.6326086", "0.6314376", "0.6293907", "0.629073", "0.6283342", "0.627734", "0.6276999", "0.62721324", "0.62721324", "0.62719357", "0.62595683", "0.62586975", "0.6248872", "0.62421864", "0.62096876", "0.62063026", "0.620317", "0.61864406", "0.6183989", "0.617825", "0.61740994", "0.6172349", "0.61690885", "0.61630625", "0.6161938", "0.61366916", "0.61341584", "0.61209047", "0.61110336", "0.61059684", "0.6104767", "0.6099633", "0.6096979", "0.6089853", "0.6084587", "0.60762095" ]
0.77726823
8
Renders the index template
def home(): this_dir = os.path.dirname(__file__) home_file = app.config['HOME_PAGE_FOLDER'] + 'homepage.html' try: homepage = open(os.path.join(this_dir, home_file), "r").read() # Store configuration file values except FileNotFoundError: homepage = '' # Keep preset values resp = make_response(render_template("home.html", page="index", homedoc=homepage, err=request.args.get('err', default=None))) if request.cookies.get('lang') is None: lang = get_locale() resp.set_cookie('lang', lang) return resp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(self):\n\t\treturn render_template('index.html')", "def index():\r\n return render_template('index.html')", "def index(self):\n return render_template('main/index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('0-index.html')", "def index(self) -> HTMLBody:\n\t\treturn render_template(\"index.jinja2\")", "def index():\n return render_template(\"index.html\",\n title='Index')", "def index():\r\n return render_template('index.html')", "def index():\r\n return render_template('index.html')", "def index():\n\n\treturn(render_template('index.html'))", "def index():\n # Render template\n return render_template('index.html')", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template(\"index.html\")", "def index():\n \n \n return render_template('index.html')", "def get_index():\n return render_template('index.html')", "def index():\n\n return render_template(\"index.html\"), 200", "def index():\n return render_template(\"index.html\", **locals())", "def index(request):\r\n\treturn render(request, 'templates/index.html')", "def index_page():\n\n return render_template(\"index.html\")", "def index_page():\n\n return render_template(\"index.html\")", "def index_page():\n\n return render_template(\"index.html\")", "def index():\n return render_template('index.html'), 200", "def index():\n\treturn render_template(\"index.html\", title=\"Home\")", "def index():\n return render_template('index.html', title='Home')", "def index():\n return render_template(\"index.html\", page_title=\"Home\")", "def index_page():\n \n return render_template(\"index.html\")", "def show_index_page():\n\n return render_template('index.html')", "def index():\n return render_template('pages/index.html', isNav=True)", "def index(self):\n\n return render_template(\n 'home_page/index.html',\n **locals()\n )", "def index(self):\n return self.render(\"admin/index.html\")", "def index():\n return render_template('index.html', getName=ASK_NAME)", "def index():\n # Renders the template (see the index.html template file for details). The\n # additional defines at the end (table, header, username) are the variables\n # handed to Jinja while it is processing the template.\n return render_template('index.html', table=table, header=header,\n username=username())", "def index():\n\treturn render_template('public/index.html', title='Home')", "def index(self):\n return render(\"/derived/rock/index.mako\")", "def index(request):\n\treturn render(request, 'Toeic/index.html')", "def todos_index_page():\n return render_template(\n template_name_or_list='index.html',\n todos=Todo.index(mongo.db))", "def get(self):\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\", content=songs, titles=titles, contentO=songsO, titlesO=titlesO)", "def index() -> Any:\n return render_template(\"index.html\")", "def index(request):\n # Render the HTML template index.html\n return render(\n request,\n 'index.html',\n )", "def index():\n today = datetime.today()\n return render_template(\"index.html.j2\", today=today)", "def index():\n return render_template('index.html', name=urlpath)", "def get(self):\n self.render(\"index.html\")", "def index():\n return render_template(\n 'index.html',\n nav=nav,\n title='Home Page',\n year=datetime.now().year\n )", "def index():\n return render_template(\n 'index_t.html',\n call_counter=str(get_model().call_counter),\n app_version=str(app.config.get('GIT_HASH', None))\n )", "def index():\n return render_template(\n 'main/index.html',\n title='Main page'\n )", "def index_template(self):\n return '{}/{}.html'.format(self.object_name, self.index_endpoint)", "def get(self):\n self.render('index.html')\n return", "def index(self):\n \n return self.view.render('index.html', {\"posts\"=posts})", "def creerIndex(request):\n context = {}\n return render(request, 'index.html', context)", "def index_view(self) -> str:\n return render_template(\n \"index.html\",\n challenge_groups=self._challenge_groups,\n ctff=current_app,\n )", "def index():\n return render_template(\"main.html\")", "def index():\n return render_template('main.html')", "def index():\n return render_template('main.html')", "def index(self):\n return render_template(\"{0}/index.html\".format(self.__APP_DIR__))" ]
[ "0.8663074", "0.8416551", "0.8395232", "0.8383464", "0.83140826", "0.83140826", "0.8239054", "0.82325274", "0.8220168", "0.82151866", "0.82151866", "0.8183771", "0.8143425", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.808803", "0.8078401", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8066346", "0.8036096", "0.8018531", "0.79757094", "0.79545426", "0.79398835", "0.7898768", "0.78937066", "0.78937066", "0.78937066", "0.7884277", "0.7883415", "0.7868813", "0.7788855", "0.7773321", "0.77644485", "0.7761943", "0.7729839", "0.7709832", "0.76929057", "0.7666915", "0.76645356", "0.7662625", "0.7655997", "0.764621", "0.7639892", "0.7603384", "0.7601745", "0.7579957", "0.7555739", "0.7524336", "0.75177985", "0.75128245", "0.7512606", "0.7506056", "0.74971163", "0.7495231", "0.74629784", "0.7454057", "0.7445256", "0.7440252", "0.7429397", "0.7429397", "0.73960966" ]
0.0
-1
Function to modify the homepage
def modify_homepage(): this_dir = os.path.dirname(__file__) home_file = app.config['HOME_PAGE_FOLDER'] + 'homepage.html' value = "" if request.form.get("newHome"): value = request.form.get("newHome") if not os.path.isdir(os.path.join(this_dir, app.config['HOME_PAGE_FOLDER'])): os.mkdir(os.path.join(this_dir, app.config['HOME_PAGE_FOLDER'])) homeFile= open(os.path.join(this_dir, home_file), "w+") homeFile.write(value) homeFile.flush() files = request.files.getlist("Attachments") for file in files: print(file.filename, file=sys.stdout) nameFile = secure_filename(file.filename) file.save(os.path.join(this_dir, app.config['HOME_PAGE_FOLDER'], nameFile)) return jsonify(result=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def homepage():\n\treturn render_template(\"home/a_homepage.html\", title=\"Welcome\")", "def homepage():\n\n return render_template('rosemead_homepage.html')", "def homepage():\n return render_template('home/index.html', \n title=\"Bem vindo!\")", "def a_homepage():\n\n\treturn render_template('home/a_homepage.html', title=\"Homepage Admin\")", "def homepage():\n return render_template('home/index.html', title=\"Home\")", "def homepage():\n return render_template('homepage.html')", "def home() -> Any:\n return home_page()", "def homepage(request):\n\treturn render(request, 'core/homepage.html')", "def homepage():\n return render_template('home/index.html', title=\"Welcome\")", "def homepage():\n return render_template('home/index.html', title=\"Welcome\")", "def homepage():\n return {'sample': 'ADAL'}", "def home(self, *args, **kwargs):\n pass", "def homepage():\n return render_template(\"home/index.html\", title=\"Welcome\")", "def homepage():\n return render_template(\"home/index.html\")", "def home():\n return render_template('homepage.html')", "def homepage(): \n\n return render_template(\"home_map.html\")", "def home():\n\n\treturn render_template('solai.html')", "def homepage():\n\n general_news=get_sources('general')\n business_news=get_sources('business')\n sports_news=get_sources('sports')\n return render_template('sources.html',general=general_news, business=business_news, sports=sports_news)", "def homepage(request):\n return render_to_response('h1ds_core/homepage.html', \n context_instance=RequestContext(request))", "def home():\n\n\treturn render_template('index.html', title='Home Page',\n\t\t\t\t\t\t year=datetime.now().year)", "def homepage():\r\n print(__name__ + \" invoked\")", "def home():\n\n return render_template('home_page.html')", "def home_page(request):\r\n return render(request, 'ez_main/home_page.html')", "def home():\n return render_template('home.html', title=\"Home\")", "def home():\n return render_template(\n 'home.html',\n title='Home Page',\n year=datetime.now().year,\n )", "def home():\r\n return render_template(\r\n 'index.html',\r\n title='Home Page',\r\n year=datetime.now().year,\r\n )", "def home():\n return render_template(\n 'index.html',\n title='Sairam sai baba',\n year=datetime.now().year,\n )", "def home():\n return render_template('Main_Page.html')", "def homepage(request):\n \n return render(request, 'homepage.html')", "def home_page() -> str:\r\n return render_template(\"home.html\")", "def home(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'AscensionESports_Baseline/index.html',\n {\n 'background': getSiteBackground(),\n 'color': getSiteColor(),\n 'title':'Home Page',\n 'year':datetime.now().year,\n }\n )", "def home():\n\n # sets the page to load depending on the type of user\n # if none specified the login screen will be displayed\n pageName = ''\n userType = session.get('UserType', None)\n if userType == None:\n pageName = 'anonHome.jade'\n elif userType == 'Seeker':\n pageName = 'indexJob.jade'\n elif userType == 'Manager':\n pageName = 'indexManager.jade'\n\n frogHop = url_for('static', filename='loop frog.gif')\n uName = session.get('UserName', 'Unknown') # load a default value if retrieval fails\n return render_template(\n pageName,\n title='Home',\n name=uName,\n getFrog=frogHop,\n year=datetime.now().year,\n )", "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year\n )", "def homepage():\n return redirect('index.html')", "def home():\n return \"<h1>Not Much Going On Here</h1>\"", "def home():\n template_stories = [\"silly_story\", \"excited_story\"]\n\n return render_template(\"homepage.html\", story_list = template_stories)", "def homepage(): \n return render_template('home/index.html',title='Welcome to SYCLIQ')\n #return render_template_string(\"Welcome to SYCLIQ\")", "def home():\n return render_template('index.html',\n title='主页',\n year=datetime.now().year,)", "def homepage():\n \n return render_template(\"coursePage.html\",courseName = \"Welcome\", Courses = COURSES, blank = 1)", "def _home(self, op, context):\n self.page = \"HOME\"\n return {'FINISHED'}", "def show_homepage():\n\n return render_template(\"blank-slate.html\")", "def home():\n return render_template('main.html')", "def menu_spe_homepage(self, event=None):\n self.link('http://pythonide.stani.be')", "def home():\n tc = totalclicks()\n tl1, tl2, tl3 = topthreelinks()\n bl1, bl2, bl3 = topblomoedlinks()\n return flask.render_template('home.html', tc=tc, tl1=tl1, tl2=tl2, tl3=tl3, bl1=bl1, bl2=bl2, bl3=bl3)", "def home():\n\n return render_template(\"home.html\")", "def home(request):\n\treturn render(request, \"compta/home.html\")", "def get_homepage():\n\n projects = hackbright.get_projects()\n students = hackbright.get_students()\n\n return render_template(\"homepage.html\",\n students=students,\n projects=projects)", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def home():\n return render_template('home.html')", "def get_home():\n return render_template(\"home.html\")", "def main_nav():\n return render_template('home.html')", "def home():\n return render_template(\"home.html\")", "def home():\n return render_template(\"home.html\")", "def home_page(wagtail_site):\n return wagtail_site.root_page", "def render_home():\r\n\treturn render_template(\"index.html\")", "def home_page():\n\n return render_template('index.html')", "def homepage():\n return (\n f\"Welcome to Hawaii - Climate Page<br/>\"\n f\"<br/>\"\n f\"This site has data from 01-01-2010 to 08-23-2017<br/>\"\n f\"<br/>\"\n f\"Available Pages:<br/>\"\n f\"<br/>\"\n f\"<br/>\"\n f\" Station Information<br/>\"\n f\" /api/v1.0/stations<br/>\"\n f\"<br/>\"\n f\" Percipitation Information<br/>\"\n f\" /api/v1.0/percipitation<br/>\"\n f\"<br/>\"\n f\" Temperature Observations<br/>\"\n f\" /api/v1.0/tobs<br/>\"\n f\"<br/>\"\n f\" Start Date information - complete url is '/api/v1.0//yyyy-mm-dd'<br/>\"\n f\" /api/v1.0/start<br/>\"\n f\"<br/>\"\n f\" Start and End Date information - complete url is '/api/v1.0/yyyy-mm-dd/yyyy-mm-dd'<br/>\"\n f\" /api/v1.0/start/end\"\n )", "def home(request):\n return render_template('core/home.html')", "def home_page():\n return \"<h4>Welcome !</h4><br><a href='/fetch'>View Results</a>\"", "def homepage():\n return redirect(\"/posts\")", "def initialPage():\n\treturn header() + footer()", "def make_navbar_for_homepage(self):\n links = [\n \"home\", [\"Result Pages\", self._result_page_links()], \"Version\"\n ]\n if len(self.samples) > 1:\n links[1][1] += [\"Comparison\"]\n if self.publication:\n links.insert(2, \"Publication\")\n if self.gwdata is not None:\n links.append([\"Detchar\", [i for i in self.gwdata.keys()]])\n if self.notes is not None:\n links.append(\"Notes\")\n return links", "def homepage():\n if g.user:\n return redirect(f\"/user/{g.user.id}\")\n else:\n return redirect(\"/landing\")", "def home():\n return render_template('index.html', year=datetime.now().year)", "def home_page():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n )", "def menu_python_homepage(self, event=None):\n self.link('http://www.python.org')", "def home_page():\n\n return render_template('index.html', stories=stories.values())", "def go_home(self):\r\n if self.home_url is not None:\r\n self.set_url(self.home_url)", "def menu_authors_homepage(self, event=None):\n self.link('http://www.stani.be')", "def _set_homepage_channel(self):\n Channel = get_model('channels', 'Channel')\n\n channel = Channel.objects.all()[0]\n channel.homepage = True\n channel.save()", "def show_homepage():\n\n pets = Pet.query.all()\n rando_pet = get_info_random_pet()\n\n name = rando_pet['petfinder']['pet']['name']['$t']\n age = rando_pet['petfinder']['pet']['age']['$t']\n image = rando_pet['petfinder']['pet']['media']['photos']['photo'][0]['$t']\n\n return render_template('homepage.html', pets=pets,\n name=name, age=age, image=image)", "def home():\n\n return render_template('index.html',\n date = d,\n mood = mood,\n header_color = color,\n day = day,\n week = week,\n month = month,\n year = year)", "def home():\n\n return render_template('index.html')", "def home():\n return render_template(\n 'index.html',\n year=datetime.now().year,\n )", "def home():\n payload = manager.get_payload()\n return render_template('index.html', payload=payload)", "def visit_homepage(self) -> None:\n if self.home_page is not None:\n webbrowser.open(self.home_page)", "def homepage():\r\n words = story.prompts\r\n # i didn't realize you could access class variables like this\r\n\r\n return render_template(\"homepage.html\", words = words)" ]
[ "0.7403166", "0.7383433", "0.7320676", "0.730273", "0.722838", "0.722588", "0.7190989", "0.71861947", "0.7157216", "0.7157216", "0.71296996", "0.71152425", "0.7102618", "0.7062718", "0.7043081", "0.6962566", "0.69548315", "0.6911093", "0.69107646", "0.6907873", "0.6875526", "0.68727916", "0.68413585", "0.68218935", "0.6816311", "0.6785617", "0.67731136", "0.67669773", "0.6755017", "0.67434895", "0.6737884", "0.6731376", "0.67027783", "0.67027783", "0.67027783", "0.67027783", "0.67027783", "0.67027783", "0.67027783", "0.67027783", "0.67027783", "0.67027783", "0.6697248", "0.6686785", "0.668297", "0.6679709", "0.66755235", "0.6661096", "0.6654625", "0.6649813", "0.66496956", "0.66454643", "0.6640424", "0.6637764", "0.66321385", "0.66299945", "0.6624038", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6622609", "0.6615179", "0.6615046", "0.6613337", "0.6613337", "0.66125184", "0.6607648", "0.66014564", "0.6580962", "0.65666336", "0.6566127", "0.65566796", "0.65520644", "0.65440536", "0.6538731", "0.65202475", "0.6515344", "0.65102", "0.64924586", "0.64886963", "0.6464244", "0.6443004", "0.6425637", "0.64253145", "0.64212465", "0.6415917", "0.64131427", "0.640401", "0.6400053" ]
0.0
-1
Generate an identity key pair. Clients should only do this once, at install time. the generated IdentityKeyPair.
def generateIdentityKeyPair(): keyPair = Curve.generateKeyPair() publicKey = IdentityKey(keyPair.getPublicKey()) serialized = '0a21056e8936e8367f768a7bba008ade7cf58407bdc7a6aae293e2c' \ 'b7c06668dcd7d5e12205011524f0c15467100dd603e0d6020f4d293' \ 'edfbcd82129b14a88791ac81365c' serialized = binascii.unhexlify(serialized.encode()) identityKeyPair = IdentityKeyPair(publicKey, keyPair.getPrivateKey()) return identityKeyPair # return IdentityKeyPair(serialized=serialized)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_key_pair(self, keysize, cb):\n\n def gen_key_pair_pub_cb(data, ctx):\n if not data:\n warning('keymanagement: Could not generate a key pair\\n')\n cb(None, None)\n else:\n cb(ctx, data)\n\n def gen_key_pair_priv_cb(data, ctx):\n if not data:\n warning('keymanagement: Could not generate a key pair\\n')\n cb(None, None)\n else:\n xrun([self.sslname, 'rsa', '-pubout'], gen_key_pair_pub_cb,\n data, data)\n\n return xrun([self.sslname, 'genrsa', str(keysize)],\n gen_key_pair_priv_cb, None)", "def gen_key_pair():\n sk = gen_secret_key(BITCOIN.gen.n)\n pk = PublicKey.from_sk(sk)\n return sk, pk", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key", "def generate_symmetric_key():\n return Fernet.generate_key()", "def generate_rsa_key_pair(self):\n\t\tprint \"Started rsa key generation\"\n\t\tkey = RSA.generate(self.key_size, randfunc=self.random_number_generator)\n\t\t\t\n\t\tpub_key = key.publickey().exportKey()\n\t\tprint pub_key\n\t\t\n\n\t\tpriv_key = key.exportKey()\n\t\tprint \"Private key\", priv_key \n\t\tprint \"Note: Normally, the private key should be protected. For the purposes of this demo, I'm printing it to terminal.\"", "def generate_key(self, **options):\n\n return security_utils_services.generate_rsa_key(**options)", "def create_key_pair(self) -> Keypair:\n res = self.context.post(\n \"/dsum/create_key_pair\", None, None, \"DSum: failed creating a Curve 25519 Keypair\")\n return Keypair(res['private_key_id'], res['public_key_id'])", "def create_keypair(self, username):\n msg = \"create_keypair not implemented\"\n raise NotImplementedError(msg)", "def generate_signing_keys():\n return SigningKey.generate(curve=SECP256k1)", "def create_key_pair(self, keypair, **kwargs):\n\n if not isinstance(keypair, models.CreateKeyPairReq):\n raise HuaweiCloudSDKException(\n message=\"The datatype of parameter(keypair) \"\n \"is not CreateKeyPairReq\")\n body_params = keypair.serialize()\n\n header_params = {}\n header_params['Accept'] = util.select_header_accept(\n ['application/xml', 'application/json'])\n\n header_params['Content-Type'] = util.select_header_content_type(\n ['application/json', 'application/xml'])\n\n return_code, return_data, _ = self.api_client.handle_raw_request(\n 'compute', 'POST',\n '/os-keypairs',\n headers=header_params,\n body=body_params,\n timeout=kwargs.get('_request_timeout', None),\n _preload_content=kwargs.get('_preload_content', True))\n\n if return_data is not None:\n return_data = json.loads(return_data)\n else:\n return_data = {}\n if return_code not in [200, 201]:\n raise HuaweiCloudSDKException(\n return_code,\n \"Run create_key_pair failed, \"\n \"message=%s\" % return_data.get(\"message\"))\n return models.CreateKeyPairResp().deserialize(return_data)", "def generate_key(self):\n key = rsa.generate_private_key(\n public_exponent=self.settings['key_public_exponent_size'],\n key_size=self.settings['key_size'],\n backend=default_backend()\n )\n return key", "def gen_key(self):\n\n if not self.private_key:\n self._gen_key()\n else:\n raise CryptoError(\"Private Key already existing\")", "def generate_key():\n key = crypto.Key.generate_key()\n click.echo('Private Key (len {}):: \\n{}'.format(\n len(key.get_privkey()),\n hexlify(key.get_privkey())))\n click.echo('Public Key (len {})::\\n{}'.format(\n len(key.get_pubkey()),\n hexlify(key.get_pubkey())))", "def test_generate_key_pair(self):\n with patch('iceit.crypto.gnupg.GPG') as mock_gpg:\n mock_key = Mock()\n mock_key.fingerprint = 'fake-fingerprint'\n mock_gpg.gen_key.return_value = mock_key\n\n mock_gpg.return_value = mock_gpg\n encryptor = self.test_init()\n fake_key = encryptor.generate_key_pair(key_type=\"RSA\", length=4096, options={\n 'name_real': 'Fake Name', 'name_email': '[email protected]', 'name_comment': 'Fake comment'})\n\n self.assertEqual(mock_gpg.gen_key_input.call_count, 1)\n self.assertEqual(fake_key, mock_key.fingerprint)", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def _gen_key(self):\n\n input_data = self._gpg.gen_key_input(key_type=\"RSA\",\n key_length=self.key_length, name_real=self.name,\n name_comment=self.comment, name_email=self.email)\n\n log.info(\"Generating key: (%s)\" % input_data)\n\n self.key = self._gpg.gen_key(input_data)", "def generate(self, force=False):\n if not self.check_force_generate(force):\n return False\n\n mkdirs(self.path)\n\n command = [openssl, 'ecparam', '-genkey', '-name', self.asn1_oid, '-out', self.key_file]\n\n self.log.info('Generating EC key')\n # Generate the keyfile with no password\n if not run_command(command):\n raise RuntimeError('EC key generation failed', self)\n\n # Now encrypt the key with a password, overwriting the original\n # passwordless key.\n if self.password:\n command = [\n openssl, 'ec',\n '-in', self.key_file,\n '-out', self.key_file,\n '-des3', '-passout', 'pass:{}'.format(self.password)\n ]\n self.log.info('Encrypting key with password')\n\n if not run_command(command):\n raise RuntimeError('EC key file password encryption failed')\n\n if not self.exists():\n raise RuntimeError(\n 'Key generation succeeded but key file does not exist. '\n 'This should not happen', self\n )", "def createKeyPair(type, bits):\n pkey = crypto.PKey()\n pkey.generate_key(type, bits)\n return pkey", "def create_key ():", "def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def new_key_pair(self):\n from plonevotecryptolib.KeyPair import KeyPair # avoids circular imports\n return KeyPair(self)", "def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def generate_keys() -> tuple:\n private_key = ecdsa.SigningKey.generate(curve=curve)\n public_key = private_key.get_verifying_key()\n\n private_key = encode_private_key(private_key)\n public_key = encode_public_key(public_key)\n\n return public_key, private_key", "def generate_ecdh_key_pair() -> tuple[X25519PrivateKey, bytes]:\n private_key = X25519PrivateKey.generate()\n public_key_raw = private_key.public_key().public_bytes(\n serialization.Encoding.Raw, serialization.PublicFormat.Raw\n )\n return private_key, public_key_raw", "def get_key_pair() -> typing.Tuple[bytes, bytes]: \n return _get_key_pair_from_sk(ecdsa.SigningKey.generate(curve=CURVE))", "def generate_key(self):\n self.key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(self.key)", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def generate_keypair(self, key_length: int = 2048) -> Tuple[bytes, bytes]:\n\n return None", "def generate_new_key(self, index):\n new_key = self.chain_key.subkey(index)\n self._key_generated(new_key, index)", "def create_key_pair(self, key_name):\r\n params = {'KeyName':key_name}\r\n return self.get_object('CreateKeyPair', params, KeyPair, verb='POST')", "def keygen():\n pk, pub = generate_signing_key()\n t = PrettyTable([\"Private (install on your witness node)\",\n \"Public (publish with 'conductor enable' command)\"])\n t.align = \"l\"\n t.add_row([pk, pub])\n\n output(t, '')", "def gen_temp_key(self, keysize=1024):\n self.temp_session_key = [None, None]\n self.key_exchange_gui.generating_temp_key()\n return self.gen_key_pair(keysize, self.gen_temp_key_cb)", "def create(self):\n self.initialize()\n\n if not self.__keypair:\n logger.info('Creating keypair %s...' % self.keypair_settings.name)\n\n if self.keypair_settings.public_filepath and os.path.isfile(\n self.keypair_settings.public_filepath):\n logger.info(\"Uploading existing keypair\")\n self.__keypair = nova_utils.upload_keypair_file(\n self._nova, self.keypair_settings.name,\n self.keypair_settings.public_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = False\n else:\n logger.info(\"Creating new keypair\")\n keys = nova_utils.create_keys(self.keypair_settings.key_size)\n self.__keypair = nova_utils.upload_keypair(\n self._nova, self.keypair_settings.name,\n nova_utils.public_key_openssh(keys))\n file_utils.save_keys_to_files(\n keys, self.keypair_settings.public_filepath,\n self.keypair_settings.private_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = True\n elif self.__keypair and not os.path.isfile(\n self.keypair_settings.private_filepath):\n logger.warn(\"The public key already exist in OpenStack \\\n but the private key file is not found ..\")\n\n return self.__keypair", "def CreateKeyFile():\n keyfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'genrsa',\n '-out', keyfile,\n '2048'\n ]\n _RunCommand(cmd)\n return keyfile", "def generate_key():\n return get_token_generator().generate_token()", "def get_keypair(ec2):\n # call the boto ec2 function to create a key pair\n key_pair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME)\n print(\"\\n===Created a new key pair in AWS.\")\n\n # capture the key and store it in a file\n KeyPairOut = str(key_pair.key_material)\n\n # create a file to store the key locally\n print(\"Saving the keypair.\")\n key_pair_path = KEY_PAIR_NAME + \".pem\"\n with open(key_pair_path, \"w\") as f:\n f.write(KeyPairOut)\n os.chmod(key_pair_path, 0o600)\n print(\"===Changed access permission to read-only.\")", "def GenerateKey(self):\n self.key_name = self.key_name or str(uuid.uuid4())\n if self.key is None or not self.key.id():\n self.key = ndb.Key(self._get_kind(), self.key_name)\n return True\n return False", "def generate_key_image(\n self, output: OutputInfo, private_view_key: bytes, private_spend_key: bytes\n ) -> bytes:", "def generate_key_pair(G):\r\n\r\n global random\r\n\r\n if random == None:\r\n random = hash_drbg.HashDRBG()\r\n\r\n if G.order == None:\r\n raise RuntimeError(\"Base point must have order.\")\r\n\r\n key_size = log(ec.leftmost_bit(G.order)) / log(2)\r\n key_size = int(ceil(key_size) / 2)\r\n private_key = 1\r\n\r\n while private_key <= 1:\r\n private_key = random(key_size) #generates a random number\r\n #with twice the required bits\r\n private_key %= G.order\r\n\r\n return (private_key, G * private_key)", "def generate(cls, params = None, quiet = False):\n\n if params is None:\n if not quiet:\n logger.debug(\"Generating new ECDSA key parameters\")\n params = KeyParams.generateEC()\n\n assert isinstance(params, KeyParams)\n\n if not quiet:\n logger.debug(\"Generating new ECDSA key\")\n\n return cls(POW = rpki.POW.Asymmetric.generateFromParams(params.get_POW()))", "def genKey(self, privateKey,otherKey):\n\t\tself.sharedSecret = self.genSecret(privateKey, otherKey)\n\n\t\t# Convert the shared secret (int) to an array of bytes in network order\n\t\t# Otherwise hashlib can't hash it.\n\t\ttry:\n\t\t\t_sharedSecretBytes = self.sharedSecret.to_bytes(\n\t\t\t\tself.sharedSecret.bit_length() // 8 + 1, byteorder=\"big\")\n\t\texcept AttributeError:\n\t\t\t_sharedSecretBytes = str(self.sharedSecret)\n\n\t\ts = hashlib.sha256()\n\t\ts.update(bytes(_sharedSecretBytes))\n\t\tself.key = s.digest()", "def generate(self, force=False):\n raise NotImplementedError(\n 'Cannot generate Key of unknown algorithm type. Use a subclass.', self\n )", "def generate_key(self):\n return str(uuid4())", "def get_identity_shared_key(self, identity, curve, their_pubkey, index=0):\n params = {'identity': identity, 'curve': curve, 'index': index,\n 'their_pubkey': their_pubkey}\n return self._jadeRpc('get_identity_shared_key', params)", "def generate_keys(self, password):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def create_key_pair(self, key_name):\n response = key_pair.create_key_pair(self.url, self.verb, self.headers,\n self.version, key_name)\n if response is not None :\n res = CreateKeyPairResponse.CreateKeyPairResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def genKey(self, otherKey):\n self.sharedSecret = self.genSecret(self.privateKey, otherKey)\n #print(\"Shared secret:\")\n #print(self.sharedSecret)\n s = hashlib.sha256()\n s.update(bytes(str(self.sharedSecret).encode()))\n self.key = s.digest()", "def generate_input_key(\n self, output: OutputInfo, private_view_key: bytes, private_spend_key: bytes\n ) -> bytes:", "def ex_create_keypair(self, name):\n params = {\n 'Action': 'CreateKeyPair',\n 'KeyName': name,\n }\n response = self.connection.request(self.path, params=params).object\n key_material = self._findtext(response, 'keyMaterial')\n key_fingerprint = self._findtext(response, 'keyFingerprint')\n return {\n 'keyMaterial': key_material,\n 'keyFingerprint': key_fingerprint,\n }", "def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")", "def gen_private_key():\n return DH.b2i(Random.new().read(DH_SIZE))", "def create(self, name, public_key=None):\n data = {\n \"keypair\": {\n \"name\": name\n }\n }\n if public_key is not None:\n data['keypair']['public_key'] = public_key\n \n path = '/os-keypairs'\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create/import openstack keypair: %s' % truncate(res))\n return res[0]['keypair']", "def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def sym_key_gen(pairing_group=None, sym_key_size=None, debug=0):\n\n # If sym_key_size is not defined, set a default value\n if sym_key_size is None:\n sym_key_size = SYM_KEY_DEFAULT_SIZE\n\n # Clamp the size between SYM_KEY_MIN_SIZE and the system maximum possible value\n size = clamp(sym_key_size, SYM_KEY_MIN_SIZE, sys.maxsize)\n\n # Check if an error occurred during clamping\n if size is None:\n logging.error('sym_key_gen clamp size exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in sym_key_gen clamp size')\n raise Exception\n\n # Check if size is a power of 2\n if not math.log2(size).is_integer():\n logging.error('sym_key_gen size exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in sym_key_gen size')\n raise Exception\n\n # Generate and return a random symmetric key with the given size\n return random_string_gen(pairing_group, sym_key_size)", "def gen_public_key(n, e):\n\n # Assign key parameters\n key_params = (n, e)\n # Construct private key\n key = RSA.construct(key_params)\n\n return key.exportKey()", "def generate_key():\n return str(uuid.uuid4())", "def vsce_uokms_server_generate_server_key_pair(self, ctx, server_private_key, server_public_key):\n vsce_uokms_server_generate_server_key_pair = self._lib.vsce_uokms_server_generate_server_key_pair\n vsce_uokms_server_generate_server_key_pair.argtypes = [POINTER(vsce_uokms_server_t), POINTER(vsc_buffer_t), POINTER(vsc_buffer_t)]\n vsce_uokms_server_generate_server_key_pair.restype = c_int\n return vsce_uokms_server_generate_server_key_pair(ctx, server_private_key, server_public_key)", "def generate_keypair(bits):\n p = generate_prime(bits // 2)\n #print(p)\n q = generate_prime(bits // 2)\n #print(q)\n n = p * q\n return PrivateKey(p, q, n), PublicKey(n)", "def generate_keystream(self):", "def gen_key(app):\n\tos.system('lxc-attach -n %s -- ssh-keygen -t rsa -N \"\" -f key' % app)", "def generate(cls, keylength = 2048, quiet = False):\n\n if not quiet:\n logger.debug(\"Generating new %d-bit RSA key\", keylength)\n if generate_insecure_debug_only_rsa_key is not None:\n return cls(POW = generate_insecure_debug_only_rsa_key())\n else:\n return cls(POW = rpki.POW.Asymmetric.generateRSA(keylength))", "def generate_key(seed):\n private_key = sha256(seed)\n public_key = privtopub(private_key)\n return {\"private\": private_key, \"public\": public_key}", "def test_create_keypair_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)", "def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in range(block_size)])\n return AES.new(second_key, AES.MODE_ECB).encrypt(pad((key.encode('ascii')), block_size))", "def create_pem_keys(self):\n self.random_rsa()\n\n return self.keys", "def generate_key():\n return get_random_bytes(KEY_SIZE)", "def create_handshake_key_pair(cls) -> Tuple[bytes, bytes]:\n ...", "def generate_private_key(self):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key generation.\")\n return self.load_private_key(SigningKey.generate(curve=self.curve))", "def get_new_key() -> rsa.RSAPrivateKeyWithSerialization:\n\n return rsa.generate_private_key(\n backend=default_backend(), public_exponent=65537, key_size=2048\n )", "def generate_keys(attributes: List[AttributeName]) -> Tuple[SecretKey, PublicKey]:\n # Pick uniformly random variables\n x = G1.order().random()\n y = {a: G1.order().random() for a in attributes}\n\n # take generators of G1 and G2\n g1 = G1.generator()\n g2 = G2.generator()\n\n # Compute Xs and Ys\n X1 = g1 ** x\n X2 = g2 ** x\n Y1 = {a: g1 ** y_i for a, y_i in y.items()}\n Y2 = {a: g2 ** y_i for a, y_i in y.items()}\n\n # Output public and secret keys\n pk = PublicKey(attributes, g1, Y1, g2, X2, Y2) # type:ignore\n sk = SecretKey(x, X1, y)\n return sk, pk", "def generate_key(self):\n cmd = self.generate_key_cmd()\n self.show(cmd)\n if self.dryrun:\n return None\n s, _, _ = self.as_user(cmd)\n assert s == 0, ('failed to generate key', cmd)\n keyname = self.extract_key_name()\n return keyname", "def generate_key():\n key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(key)", "def generate_key_and_cert():\n signing_key = rsa.generate_private_key(backend=crypto_default_backend(), public_exponent=65537, key_size=2048)\n subject = issuer = x509.Name(\n [\n x509.NameAttribute(NameOID.COUNTRY_NAME, 'NO'),\n x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, 'OSLO'),\n x509.NameAttribute(NameOID.LOCALITY_NAME, 'OSLO'),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, 'Intility AS'),\n x509.NameAttribute(NameOID.COMMON_NAME, 'intility.no'),\n ]\n )\n signing_cert = (\n x509.CertificateBuilder()\n .subject_name(subject)\n .issuer_name(issuer)\n .public_key(signing_key.public_key())\n .serial_number(x509.random_serial_number())\n .not_valid_before(datetime.utcnow())\n .not_valid_after(\n # Our certificate will be valid for 10 days\n datetime.utcnow()\n + timedelta(days=10)\n # Sign our certificate with our private key\n )\n .sign(signing_key, hashes.SHA256(), crypto_default_backend())\n .public_bytes(crypto_serialization.Encoding.DER)\n )\n return signing_key, signing_cert", "def create_rsa_key_pair() -> Tuple[str, str]:\n key = RSA.generate(RSA_KEY_STRENGTH)\n public_key = key.publickey().export_key().decode()\n private_key = key.export_key().decode()\n return public_key, private_key", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def create_keypair(self, name=None, public_key=None):\n body = {}\n if name is not None:\n body.update({\"name\": name})\n if public_key is not None:\n body.update({\"public_key\": public_key})\n return self._create(_keypair.Keypair, **body)", "def generate_rsa_auxiliary_key_pair() -> AuxiliaryKeyPair:\n rsa_key_pair = rsa_keypair()\n return AuxiliaryKeyPair(rsa_key_pair.private_key, rsa_key_pair.public_key)", "def gnupg_keypair(\n gnupg_gen_key_conf: Path, gnupg_trust_store: GnuPGTrustStore\n) -> GnuPGKeypair:\n\n LOGGER.debug(\"Initializing GPG keypair ...\")\n environment = {\"HOME\": \"/dev/null\"}\n result = subprocess.run(\n [\n \"gpg\",\n \"--batch\",\n \"--homedir\",\n str(gnupg_trust_store.gnupg_home),\n \"--gen-key\",\n \"--keyid-format\",\n \"long\",\n str(gnupg_gen_key_conf),\n ],\n capture_output=True,\n check=True,\n env=environment,\n )\n keyid = re.findall(\n r\"gpg: key (\\w+) marked as ultimately trusted\", result.stderr.decode(\"utf-8\")\n )[0]\n # LOGGER.debug(\" keyid : %s\", keyid)\n\n result = subprocess.run(\n [\n \"gpg\",\n \"--fingerprint\",\n \"--fingerprint\", # Double --fingerprint needed for subkeys\n \"--homedir\",\n str(gnupg_trust_store.gnupg_home),\n \"--with-colons\",\n str(keyid),\n ],\n capture_output=True,\n check=True,\n env=environment,\n )\n # Fingerprint order: pubkey [, subkey ]...\n fingerprints = re.findall(r\"fpr:{9}(\\w+):\", result.stdout.decode(\"utf-8\"))\n LOGGER.debug(\" Fingerprints:\")\n for fingerprint in fingerprints:\n LOGGER.debug(\" %s\", fingerprint)\n\n yield GnuPGKeypair(\n fingerprints=fingerprints,\n gen_key_conf=gnupg_gen_key_conf,\n gnupg_home=gnupg_trust_store.gnupg_home,\n keyid=keyid,\n )", "def generate_key():\n key = Fernet.generate_key()\n with open(\"pass.key\", \"wb\") as key_file:\n key_file.write(key)", "def _create_pkey(self, commonname, serial):\n pkey = PKey()\n pkey.generate_key(crypto.TYPE_RSA, self.key_bits)\n private = crypto.dump_privatekey(crypto.FILETYPE_PEM,\n pkey).decode()\n key_path = self._get_key_path(commonname, serial)\n if os.path.exists(key_path):\n raise FileExistsError(key_path)\n with open(key_path, 'w') as private_file:\n private_file.writelines(private)\n\n key_link = self._get_key_link(commonname)\n if os.path.exists(key_link):\n os.unlink(key_link)\n os.symlink(os.path.basename(key_path), key_link)\n\n return pkey", "def keygen(\n args: argparse.Namespace,\n config: KSKMConfig,\n p11modules: KSKM_P11,\n logger: logging.Logger,\n) -> bool:\n logger.info(\"Generate key\")\n flags = FlagsDNSKEY.ZONE.value | FlagsDNSKEY.SEP.value\n dnssec_alg = AlgorithmDNSSEC[args.key_alg]\n if is_algorithm_rsa(dnssec_alg):\n if args.key_size is None:\n raise argparse.ArgumentError(\n args.key_size, \"RSA key generation requires key size\"\n )\n p11key = generate_rsa_key(\n flags, args.key_size, p11modules, label=args.key_label\n )\n elif is_algorithm_ecdsa(dnssec_alg):\n crv = algorithm_to_curve(dnssec_alg)\n p11key = generate_ec_key(flags, crv, p11modules, label=args.key_label)\n else:\n raise ValueError(f\"Unknown key algorithm {repr(args.key_alg)}\")\n\n if not p11key or not p11key.public_key:\n raise RuntimeError(\"No public key returned by key generation\")\n\n # Calculate the DNSSEC key tag of the new key and look for a collision in the configuration\n key_tags: List[int] = []\n _key = public_key_to_dnssec_key(\n key=p11key.public_key,\n key_identifier=p11key.label,\n algorithm=AlgorithmDNSSEC[args.key_alg],\n flags=FlagsDNSKEY.SEP.value | FlagsDNSKEY.ZONE.value,\n ttl=config.ksk_policy.ttl,\n )\n logger.info(\n f\"Generated key {p11key.label} has key tag {_key.key_tag} for algorithm={_key.algorithm}, \"\n f\"flags=0x{_key.flags:x}\"\n )\n key_tags += [_key.key_tag]\n _revoked_key = public_key_to_dnssec_key(\n key=p11key.public_key,\n key_identifier=p11key.label,\n algorithm=AlgorithmDNSSEC[args.key_alg],\n flags=FlagsDNSKEY.SEP.value | FlagsDNSKEY.ZONE.value | FlagsDNSKEY.REVOKE.value,\n ttl=config.ksk_policy.ttl,\n )\n logger.info(\n f\"Generated key {p11key.label} has key tag {_revoked_key.key_tag} with the REVOKE bit set \"\n f\"(flags 0x{_revoked_key.flags:x})\"\n )\n key_tags += [_revoked_key.key_tag]\n\n for _name, ksk in config.ksk_keys.items():\n if ksk.key_tag in key_tags:\n logger.error(\n f\"Generated key {p11key.label} has key tags {key_tags} matching \"\n f\"KSK key in configuration: {ksk}\"\n )\n raise RuntimeError(\"Key tag collision detected\")\n\n _now = datetime.utcnow()\n # create_trustanchor_keydigest wants an KSKKey, but it is not used in the digest calculation\n _temp_ksk = KSKKey(\n description=\"Newly generated key\",\n label=_now.isoformat(),\n key_tag=_key.key_tag,\n algorithm=_key.algorithm,\n valid_from=_now,\n valid_until=_now,\n )\n _domain = \".\"\n _ds = create_trustanchor_keydigest(_temp_ksk, _key, domain=_domain)\n digest = binascii.hexlify(_ds.digest).decode(\"UTF-8\").upper()\n _digest_type = \"2\" # create_trustanchor_keydigest always does SHA256\n logger.info(\n f\"DS record for generated key:\\n\"\n f\"{_domain} IN DS {_key.key_tag} {_key.algorithm.value} {_digest_type} {digest}\\n\"\n f\">> {' '.join(pgp_wordlist(_ds.digest))}\"\n )\n\n return True", "def generateKeys(self, keys_path, minion_id):\n #Change directory to keys path\n os.chdir(keys_path)\n #Give permission to the salt user\n self.console_manager.printRed(\"Giving permission to the salt user\")\n command = ['sudo', 'chmod', 'a+rwx', '.']\n self.console_manager.runCommandFromShell(command)\n #Generate keys\n self.console_manager.printRed(''.join([\"Generating keys for minion id: \", minion_id]))\n command = ['sudo', 'salt-key', ''.join(['--gen-keys=', minion_id])]\n self.console_manager.runCommandFromShell(command)\n #Give permission to the salt user\n self.console_manager.printRed(\"Allowing vagrant to handle private keys\")\n command = ['sudo', 'chmod', 'a+rwx', ''.join([minion_id, '.pub']), ''.join([minion_id, '.pem'])]\n self.console_manager.runCommandFromShell(command)\n #Add public key to the accepted minion folder\n self.console_manager.printRed(\"Copying the minion public key to the salt master public keys folder\")\n command = ['sudo', 'cp', ''.join([minion_id, '.pub']), ''.join(['/var/lib/salt/pki/master/minions/', minion_id])]\n self.console_manager.runCommandFromShell(command)\n command = ['sudo', 'cp', ''.join([minion_id, '.pub']), ''.join(['/etc/salt/pki/master/minions/', minion_id])]\n self.console_manager.runCommandFromShell(command)\n return", "def generate(self, force=False):\n if not self.check_force_generate(force):\n return False\n\n mkdirs(self.path)\n\n command = [openssl, 'genrsa', '-out', self.key_file]\n if self.password:\n command += ['-passout', 'pass:{}'.format(self.password)]\n command += [str(self.key_size)]\n\n self.log.info('Generating RSA key')\n if not run_command(command):\n raise RuntimeError('RSA key generation failed')\n\n if not self.exists():\n raise RuntimeError(\n 'Key generation succeeded but key file does not exist. '\n 'This should not happen', self\n )", "def generate_key(self, filename, size):\n if size != 16 and size != 24 and size != 32:\n raise ValueError(\"AES key size not valid.\")\n key = os.urandom(size)\n self.export_key(filename, key)\n return key", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def gen_pgp_key(name, email, comment=\"generated by sdata\"):\n\n # we can start by generating a primary key. For this example, we'll use RSA, but it could be DSA or ECDSA as well\n key = pgpy.PGPKey.new(PubKeyAlgorithm.RSAEncryptOrSign, 4096)\n\n # we now have some key material, but our new key doesn't have a user ID yet, and therefore is not yet usable!\n uid = pgpy.PGPUID.new(name, comment=comment, email=email)\n\n # now we must add the new user id to the key. We'll need to specify all of our preferences at this point\n # because PGPy doesn't have any built-in key preference defaults at this time\n # this example is similar to GnuPG 2.1.x defaults, with no expiration or preferred keyserver\n key.add_uid(uid, usage={KeyFlags.Sign, KeyFlags.EncryptCommunications, KeyFlags.EncryptStorage},\n hashes=[HashAlgorithm.SHA256, HashAlgorithm.SHA384, HashAlgorithm.SHA512, HashAlgorithm.SHA224],\n ciphers=[SymmetricKeyAlgorithm.AES256, SymmetricKeyAlgorithm.AES192, SymmetricKeyAlgorithm.AES128],\n compression=[CompressionAlgorithm.ZLIB, CompressionAlgorithm.BZ2, CompressionAlgorithm.ZIP,\n CompressionAlgorithm.Uncompressed])\n return key", "def gen_key(self, key):\n b_key = self._hash_digest(key)\n return self._hash_val(b_key, lambda x: x)", "def create_key() -> RSA.RsaKey:\n\n return RSA.generate(1024, Crypto.Random.new().read)", "def create_keypair(key_name):\n if os.path.isfile(SSH_FOLDER + key_name + \".pem\"):\n return # Key already created\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n key = ec2.create_key_pair(key_name)\n key.save(SSH_FOLDER)", "def create_rsa_keypair( config_path = config.CONFIG_PATH(), prefix = RSA_KEYPAIR_PREFIX(), keysize = RSA_KEYPAIR_SIZE(), force = False ) :\n\n public_key_path, private_key_path = valid_rsa_keypair_paths( config_path, prefix, force )\n utils.create_directory( config_path, 0700 )\n public_key, private_key = rsa.newkeys( keysize )\n with open( public_key_path, \"w\" ) as stream :\n stream.write( public_key.save_pkcs1() )\n os.chmod( public_key_path, 0644 )\n with open( private_key_path, \"w\" ) as stream :\n stream.write( private_key.save_pkcs1() )\n os.chmod( private_key_path, 0600 )\n return public_key_path, private_key_path", "def generate_key(self)->bytes:\n return os.urandom(32)", "def generate_key():\n # generate random key\n key = get_random_string()\n\n # if it's already taken, generate another\n if EmailManager.objects.filter(key=key).exists():\n return EmailManager.generate_key()\n\n # return it\n return key", "def generate_private_key():\n\treturn binascii.hexlify(os.urandom(32)).decode('utf-8').upper()", "def Generate(size=keyinfo.AES.default_size):\n key_bytes = util.RandBytes(size // 8)\n key_string = util.Base64WSEncode(key_bytes)\n hmac_key = HmacKey.Generate() # use default HMAC-SHA1 key size\n return AesKey(key_string, hmac_key, size)", "def _create_rsa_key_pair(self, length, public_exponent=65537):\n self.logger.info(\n \"Generating an RSA key pair with length: {0}, and \"\n \"public_exponent: {1}\".format(\n length, public_exponent\n )\n )\n try:\n private_key = rsa.generate_private_key(\n public_exponent=public_exponent,\n key_size=length,\n backend=default_backend())\n public_key = private_key.public_key()\n\n private_bytes = private_key.private_bytes(\n serialization.Encoding.DER,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption())\n public_bytes = public_key.public_bytes(\n serialization.Encoding.DER,\n serialization.PublicFormat.PKCS1)\n except Exception as e:\n self.logger.exception(e)\n raise exceptions.CryptographicFailure(\n \"An error occurred while generating the RSA key pair. \"\n \"See the server log for more information.\"\n )\n\n public_key = {\n 'value': public_bytes,\n 'format': enums.KeyFormatType.PKCS_1,\n 'public_exponent': public_exponent\n }\n private_key = {\n 'value': private_bytes,\n 'format': enums.KeyFormatType.PKCS_8,\n 'public_exponent': public_exponent\n }\n\n return public_key, private_key", "def generate_key_cmd(self, cfg_path=None):\n # TODO: use tempfile\n if cfg_path is None:\n cfg_path = '/tmp/gen-key.cfg'\n self.create_gen_key_cfg_file(cfg_path)\n return '/usr/bin/gpg --batch --gen-key {cfg_path}'.format(cfg_path=cfg_path)", "def generate_key(self, rand = random.SystemRandom()):\n k = rand.randrange(0, self.n - 1)\n return k, self.base_mul(k)" ]
[ "0.71120954", "0.68204904", "0.65850353", "0.6553852", "0.65218014", "0.6443924", "0.6378423", "0.63755655", "0.6358998", "0.6324435", "0.63147116", "0.6309187", "0.62952316", "0.6251726", "0.61868006", "0.61828184", "0.61474425", "0.6133282", "0.61287004", "0.60888094", "0.6040706", "0.60231555", "0.60195357", "0.6008617", "0.5979798", "0.597343", "0.5961485", "0.5926425", "0.5888555", "0.5860322", "0.5832551", "0.58271146", "0.5826103", "0.5816542", "0.5814549", "0.5814375", "0.5811282", "0.580594", "0.5791124", "0.5787104", "0.5783938", "0.5780605", "0.5772091", "0.577154", "0.57712805", "0.5761656", "0.5755106", "0.57416207", "0.5735575", "0.5733245", "0.5733074", "0.57320446", "0.5718721", "0.5714307", "0.5711257", "0.5708222", "0.5707401", "0.5701368", "0.57010907", "0.5681143", "0.5656023", "0.56471545", "0.56464636", "0.56459415", "0.56458044", "0.5638617", "0.5627469", "0.56126213", "0.5609052", "0.559526", "0.5586959", "0.5567825", "0.5563508", "0.5558474", "0.5553827", "0.5553125", "0.5548605", "0.5540845", "0.5537478", "0.5534268", "0.5534091", "0.55316025", "0.55031395", "0.5498549", "0.548971", "0.5485279", "0.547969", "0.5466015", "0.5443588", "0.5441804", "0.5425906", "0.54223436", "0.5415646", "0.54140556", "0.5407447", "0.5395988", "0.53800106", "0.5367588", "0.5365692", "0.5363728" ]
0.8180875
0
Generate a registration ID. Clients should only do this once, at install time.
def generateRegistrationId(): regId = KeyHelper.getRandomSequence() return regId
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_id():\n return uuid4().get_hex()", "def generateID(self):\n\n return str(uuid.uuid1())", "def get_id(self) -> str:\n return self._register_id", "def makeid(cls):\n return str(uuid.uuid4().hex)", "def generate_id():\n\treturn \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def id_generator():\r\n new_id = uuid.uuid4()\r\n return new_id.hex", "def gen_uuid():\n return str( uuid.uuid4() )", "def gen_uuid():\n return str( uuid.uuid4() )", "def generate_id():\n return \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def _generate_uuid(self):\n\n return uuid.uuid4()", "def generate_uuid():\n return uuid.uuid4()", "def gen_uuid():\n return str(uuid.uuid4())", "def unique_id() -> str:", "def req_id_generator() -> str:\n # 8 chars long should be long enough, add the 'Generated' prefix to know not to search for this id in the elb logs\n return f'Generated-{str(uuid.uuid4())[:8]}'", "def generateId( self ):\n # try to use the uuid module\n try:\n import uuid\n return uuid.uuid1()\n \n # otherwise, use the random module\n except ImportError:\n import random\n return random.randint(-1000000000000, 1000000000000)", "def userIDGen() :\n\treturn __randomString(8)", "def gen_uuid() -> str:\n return str(uuid4())", "def new_uid():\n return str(uuid.uuid1())[:30]", "def generate_user_id() -> str:\n return 'u' + str((uuid.getnode()))", "def _generate_tracking_number(self):\n return uuid.uuid4().hex.upper()", "def generate_id():\n return str(uuid.uuid4())[:5].replace('e','a')", "def _generate_uuid():\n return str(uuid.uuid4())", "def generate_uuid():\n return uuid.uuid4()", "def generate_uuid():\n return uuid.uuid4().hex", "def generate_wallet_id(cls) -> str:\n return str(uuid.uuid4())", "def generate_uuid():\n return f'{uuid.uuid1()}'", "def gen_id(self) -> str:\n self._id += 1\n return str(self._id)", "def create_uid():\n return random_string(5, string.hexdigits.lower())\n # return (\"%x\" % (int(time.time()) * 0x10 % 0x1000000000)\n # + random_string(7, string.hexdigits.lower()))", "def generate_key(self):\n return str(uuid4())", "def _generate_id() -> str:\n return \"\".join(sample(\"abcdefghjkmopqrstuvqxyz\", 16))", "def generate_key():\n return str(uuid.uuid4())", "def generate_device_id():\n\n # TODO this is awful, makes me sad, but for now also makes demoing\n # easier We might want to look into an auto-configuration feature for\n # devices, such that ids are not input manually on devices\n\n _attempts = 0\n generated_id = ''\n while _attempts < 10 and len(generated_id) == 0:\n _attempts += 1\n new_id = create_id()\n if Device.query.filter_by(id=new_id).first() is None:\n LOGGER.debug(f\" Generated a new device id {new_id}\")\n return new_id\n\n LOGGER.error(f\" Failed to generate unique device_id\")\n raise HTTPRequestError(500, \"Failed to generate unique device_id\")", "def generate_product_number():\n return str(uuid.uuid4())", "def _uniq_id():\n return random.getrandbits(64)", "def get_unique_id():\n global unique_id_increment\n if unique_id_increment is None:\n unique_id_increment = 0\n unique_id_increment += 1\n return '%d%d' % (int(time.time()), unique_id_increment)", "def get_or_create_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def generate_token():\n return uuid4()", "def get_unique_id():\n global unique_id_increment\n if unique_id_increment is None:\n unique_id_increment = 0\n unique_id_increment += 1\n return \"%d%d\" % (int(time.time()), unique_id_increment)", "def get_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def generate_uid(length=10):\n uid = uuid.uuid4().hex[0:length]\n return uid", "def generateUID(self):\n global previous_id\n \n id = previous_id\n previous_id += 1\n \n return id", "def get_generator_id() -> str:\n res = os.name + str(os.getpid()) + str(random.randint(-1000, 1000))\n res = hashlib.sha224(res.encode('utf-8')).digest()\n res = base64.b64encode(res).decode('utf-8')\n return res", "def unique_id() -> bytes:", "def generateUUID(): # pylint: disable=C0103\r\n return str(uuid.uuid4())", "def _generate_id(self, context):\n tmp = datetime.datetime.now()\n tmp = tmp.strftime('%Y%m%d%H%M%S%f')\n tmp += context.peer()\n m = hashlib.md5()\n m.update(tmp.encode('utf-8'))\n return str(m.hexdigest())", "def get_unique_id():\n\n return int.from_bytes(os.urandom(3), byteorder='big')", "def unique_id(self):\n return (\n \"a80f3d5b-df3d-4e38-bbb7-1025276830cd\"\n )", "def generate_trackerid():\n\n trackerid = None\n while trackerid is None or \\\n Profile.objects.filter(trackerid=trackerid).exists():\n trackerid = uuid.uuid4().hex\n return trackerid", "def unique_id(self):\n id = \"{}{}{}\".format(\n DOMAIN, self._account, self.sensorName.lower().replace(\" \", \"\")\n )\n return id", "def generate_trackerid():\r\n\r\n trackerid = None\r\n while trackerid is None or \\\r\n Profile.objects.filter(trackerid=trackerid).exists():\r\n trackerid = uuid.uuid4().hex\r\n return trackerid", "def make_id():\n global _simple_id\n\n import uuid\n from ..settings import settings\n\n if settings.simple_ids(False):\n _simple_id += 1\n new_id = _simple_id\n else:\n new_id = uuid.uuid4()\n return str(new_id)", "def build_id():\n return \"test123\"", "def generate_user(self):\n token = str(uuid.uuid4())\n return self.generate_subid(token=token, return_user=True)", "def generate_message_control_id():\n d = datetime.datetime.utcnow()\n # Strip off the decade, ID only has to be unique for 3 years.\n # So now we have a 16 char timestamp.\n timestamp = d.strftime(\"%y%j%H%M%S%f\")[1:]\n # Add 4 chars of uniqueness\n unique = \"\".join(random.sample(alphanumerics, 4))\n return timestamp + unique", "def registration_definition_id(self) -> str:\n return pulumi.get(self, \"registration_definition_id\")", "def generate_project_key():\n return shortuuid.ShortUUID().random(length=32)", "def generate_session_id():\n return utils.get_32bit_random_num()", "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")", "def _unique_id():\n id = \"\"\n for i in xrange(0,8):\n id += choice(ascii_letters)\n return id", "def genShareID(store):\n return unicode(os.urandom(16).encode('hex'), 'ascii')", "def generate_request_id():\n return 'req-%s' % uuid.uuid4()", "def generate_request_id():\n return 'req-%s' % uuid.uuid4()", "def generate_id(urandom=os.urandom, encode=base64.b64encode):\n return encode(urandom(12)).strip()", "def uuid():\n from dallinger.experiment import Experiment\n\n click.echo(Experiment.make_uuid())", "def generate_message_id():\n return str(uuid.uuid1())", "def unique_id(self):\n return f\"{self.device.id}-{self.key}\"", "def _get_unique_id(self):\n now = datetime.now()\n\n u_id = now.second + 60*(now.minute + 60*(now.hour + 24*(now.day + 31*(now.month + 366*(now.year)))))\n return \"instance\" + str(u_id)", "def unique_id(self) -> str:\n return self.get_unique_id(wallet=self.wallet_id, nonce=self.nonce)", "def guid():\n base_uuid = uuid.uuid4()\n number = base_uuid.int & ((2 ** 20) - 1)\n return base62_encode(number)", "def generate_transaction_id():\r\n return str(int(time.time() * 1000))", "def generate_random_UID(self):\n\t\tUID = random.randint(0, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)\n\t\tprint(f\"Generated random UID {UID}\")\n\t\treturn UUID(int=UID)", "def unique_id(self) -> str:\n return f\"{self._host}_{self._name}_{self._unique_id}\"", "def unique_id() -> bytes:\n ...", "def new_case_id():\n return uuid.uuid4().hex", "def new_case_id():\n return uuid.uuid4().hex", "def create_id_number(self):\n id_number = get_random_string(8).lower()\n if User.objects.filter(id_number=id_number).first():\n self.create_id_number()\n\n return id_number", "def generate_unique_name():\n return 'titanic-' + str(get_mac())", "def _generate(self, hashed = True):\r\n\r\n identifier = str(uuid.uuid4())\r\n identifier = identifier.upper()\r\n if not hashed: return identifier\r\n identifier = legacy.bytes(identifier)\r\n hash = hashlib.sha256(identifier)\r\n identifier = hash.hexdigest()\r\n identifier = identifier.upper()\r\n return identifier", "def createUniqueRatingId():\n #connector = appEngine.connect()\n ratingID = 'r' + str(ceil(time.time()))\n return ratingID", "def get_uuid():\n return str(uuid4())", "def __create_periodic_id() -> str:\n now = datetime.now()\n return now.strftime(\"%m%d%Y%H%M%S%f\")", "def unique_id(self) -> str:\n return \"{}-{}-{}\".format(*self._id)", "def unique_name():\n return \"unique-{0}\".format(uuid.uuid4())", "def generate_id(cls):\n cls._index += 1\n return 'fp_%s' % cls._index", "def generate_fwan_process_id() -> str:\n return str(uuid.uuid4())", "def _generate_id(self, result):\n validation = result.validation\n\n #this is still not optimal - validations may mutate between creation\n #and here\n message = str(type(validation)) + str(validation.__dict__)\n hasher = hashlib.md5()\n hasher.update(message.encode('utf-8'))\n pagerduty_id = hasher.hexdigest()\n\n logger.debug(\"Generated id {} for {}\".format(pagerduty_id, result))\n return pagerduty_id", "def unique_id(self) -> str:\n return f\"{self.wallet_id}{self.WALLET_KEY_POSTFIX}\"", "def unique_id(self) -> str:\n return '{0}_{1}'.format(self._mac.replace(':', ''), self.entity_id)", "def unique_id(self) -> str:\n return f\"{self._mac}_tracker\"", "def unique_identifier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"unique_identifier\")", "def _generate_order_id():\n current_milli_time = str(int(round(time.time())))\n rand_str = random_string_generator()\n\n return '%s%s' % (rand_str, current_milli_time)", "def create_job_id() -> str:\n return str(uuid.uuid1())", "def generate_random_uid():\n choice = string.ascii_uppercase + string.digits\n return ''.join([random.choice(choice) for _ in range(36)])", "def generate_timer_id():\n\treturn base64.b64encode(os.urandom(30), b\"Qx\").decode(\"ascii\")", "def unique_id(self) -> str:\n return f\"{self._inst.lower()}-{self._sid_data['sid']}_switch-{self._data[self._sid_data['sid_ref']]}\"", "def create_tag_id():\n return uuid.uuid1().int", "def get_uuid():\n\n return str(uuid.uuid4())" ]
[ "0.7785231", "0.7583207", "0.74686027", "0.74049723", "0.7362628", "0.72884035", "0.72758955", "0.72758955", "0.7264104", "0.7262016", "0.72611034", "0.72470003", "0.7220589", "0.721949", "0.7148431", "0.71448946", "0.7102137", "0.7092246", "0.70777285", "0.7075378", "0.7058048", "0.70572597", "0.70404285", "0.7021929", "0.70185554", "0.7004354", "0.6991372", "0.6982666", "0.6977142", "0.6976874", "0.69265956", "0.6922818", "0.6920532", "0.6920021", "0.69151175", "0.6894725", "0.6881062", "0.6872324", "0.68658227", "0.6851346", "0.6846814", "0.6846124", "0.68284935", "0.6819174", "0.68134946", "0.68121684", "0.6811017", "0.68078136", "0.68037605", "0.68033564", "0.6787717", "0.6780371", "0.67772377", "0.67704326", "0.67666054", "0.6738121", "0.6736554", "0.67172754", "0.6714752", "0.6714752", "0.6711141", "0.6695959", "0.66789913", "0.66786873", "0.66786873", "0.6672443", "0.6662801", "0.6655348", "0.6636832", "0.6636472", "0.6617228", "0.6610122", "0.66025835", "0.65934443", "0.65799636", "0.65629315", "0.6561396", "0.6561396", "0.6545792", "0.654485", "0.65413284", "0.6536594", "0.6531077", "0.6527765", "0.65219384", "0.6517499", "0.65172976", "0.65166265", "0.6502527", "0.6494201", "0.64862734", "0.64835685", "0.6476718", "0.64696026", "0.6468187", "0.64579165", "0.6456893", "0.6456785", "0.64560705", "0.6451179" ]
0.8603011
0
Generate a list of PreKeys. Clients should do this at install time, and subsequently any time the list of PreKeys stored on the server runs low. PreKey IDs are shorts, so they will eventually be repeated. Clients should store PreKeys in a circular buffer, so that they are repeated as infrequently as possible. start The starting PreKey ID, inclusive. count The number of PreKeys to generate. the list of generated PreKeyRecords.
def generatePreKeys(start, count): results = [] start -= 1 for i in range(0, count): preKeyId = ((start + i) % (Medium.MAX_VALUE - 1)) + 1 results.append(PreKeyRecord(preKeyId, Curve.generateKeyPair())) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_keys(self):\n self.keys = []\n key = string_to_bit_array(self.passwd)\n key = self.permutation(key, CP_1) # Perform initial permutation on the key\n g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)\n for i in range(16): # Apply the 16 rounds\n g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round\n tmp = g + d # Merge them\n self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki", "def gen_keys():", "def create(self, record_count, start_id, lock=None):\n\n message_reference_beginning = self.create_random_string(10)\n\n records = []\n\n for i in range(start_id, record_count + start_id):\n record = self.__create_record(i, message_reference_beginning)\n records.append(record)\n\n return records", "def __initialSigningKeys(self) -> None:\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")", "def get_generator(strains, reference_id, start, end):\n primary_keys = []\n vals = range(start, end+1)\n for val in vals:\n for strain in strains:\n primary_keys.append(strain+\"_\"+reference_id+\"_\"+str(val))\n return primary_keys", "def import_to_reids(record_count=8):\n\ttry:\n\t\tconn = redis.Redis(host=HOST,port=PORT,password=PASSWD)\n\texcept:\n\t\tprint 'connection error'\n\t\tsys.exit(0)\n\n\t# add to a set,transaction with pipeline\n\ttrans = conn.pipeline(transaction=True) \n\tset_name = 'activation_code'\n\ttry:\n\t\tfor i in xrange(record_count):\n\t\t\tcode = activation_code_generaor()\n\t\t\ttrans.sadd(set_name,code)\n\t\ttrans.execute() #commit all commands at a time\n\t\t# show the code\n\t\tprint'success,number of keys in a set:',conn.scard(set_name)\n\texcept:\n\t\tprint 'error,rollback'\n\t\tsys.exit(0)", "def create_keys(i):\n sk = elgamal.create_sk()\n secret_keys.append(sk)\n\n keys = [0, 0]\n\n keys[x[i]] = elgamal.gen(sk)\n keys[1 - x[i]] = elgamal.o_gen()\n\n public_keys.append(keys)", "def keys_fetch(self):\n with self.env.begin(write=False) as txn:\n cursor = txn.cursor()\n tot = txn.stat()['entries']\n i = 0\n\n path = self.db_path\n base_name = self.base_path\n cache_file_path = os.path.join(path, '_cache_' + base_name + '.pkl')\n print('cache_file_path = ', cache_file_path) # DEBUG\n\n if os.path.isfile(cache_file_path):\n self.keys = pickle.load(open(cache_file_path, 'rb'))\n self._num_examples = tot\n else:\n keys = []\n for key, _ in cursor:\n i += 1\n if i % 1000 == 0 or i == tot:\n print('Fetching {:>8d} /{:>8d} keys'.format(i, tot),\n end='\\r')\n keys.append(key)\n print('\\nDone.')\n self._num_examples = tot\n self.keys = np.asarray(keys)\n pickle.dump(self.keys, open(cache_file_path, 'wb'))", "def keys(self):\n key_index = 0x4\n\n for _ in range(0, self._keys_len()):\n key_offset = self.abs_offset_from_hbin_offset(self.unpack_dword(key_index))\n\n d = HBINCell(self._buf, key_offset, self)\n yield NKRecord(self._buf, d.data_offset(), self)\n key_index += 4", "def keys(self):\n key_index = 0x4\n\n for _ in range(0, self._keys_len()):\n key_offset = self.abs_offset_from_hbin_offset(self.unpack_dword(key_index))\n\n d = HBINCell(self._buf, key_offset, self)\n yield NKRecord(self._buf, d.data_offset(), self)\n key_index += 8", "def generate_begin(self):\n\t\tlist = []\n\t\tlist += self.ks['begin']\n\t\treturn list", "def get_key_set():\n keys = [0] * n_families\n for i in range(n_families):\n keys[i] = get_key(i)\n\n return keys", "def random_key_gen(self, number):\n if (number > len(self.vocables)):\n raise IndexError('Liste enthaelt nicht genuegend Elemente')\n else:\n k_list = list(self.vocables.keys())\n random.shuffle(k_list)\n for k in k_list[:number]:\n yield k", "def made_key(self):\n \n # select a random number from 1 to infinity \n ran_number = random.randint(1,99)\n\n # create a random set based on the first number you chose \n set = xrange(ran_number,28*ran_number,ran_number)\n\n # increase the value of every number in the set \n for item in set:\n item += 3\n Code_Fouad_Teniou.my_key.append(item)\n\n #return a random key \n return Code_Fouad_Teniou.my_key", "def __generateUserIDs(self,_count):\n return map(lambda x:self.__getNewUserID(),range(_count))", "def _get_primary_keys(self, table_name, num_rows):\n primary_key = self.metadata.get_primary_key(table_name)\n primary_key_values = None\n\n if primary_key:\n field = self.metadata.get_fields(table_name)[primary_key]\n\n generator = self.primary_key.get(table_name)\n\n if generator is None:\n if field['type'] != 'id':\n raise ValueError('Only columns with type `id` can be primary keys')\n\n subtype = field.get('subtype', 'integer')\n if subtype == 'integer':\n generator = itertools.count()\n remaining = np.inf\n elif subtype == 'string':\n regex = field.get('regex', r'^[a-zA-Z]+$')\n generator = exrex.generate(regex)\n remaining = exrex.count(regex)\n elif subtype == 'datetime':\n raise NotImplementedError('Datetime ids are not yet supported')\n else:\n raise ValueError('Only `integer` or `string` id columns are supported.')\n\n self.primary_key[table_name] = generator\n self.remaining_primary_key[table_name] = remaining\n\n else:\n remaining = self.remaining_primary_key[table_name]\n\n if remaining < num_rows:\n raise ValueError(\n 'Not enough unique values for primary key of table {}'\n ' to generate {} samples.'.format(table_name, num_rows)\n )\n\n self.remaining_primary_key[table_name] -= num_rows\n primary_key_values = pd.Series([x for i, x in zip(range(num_rows), generator)])\n\n return primary_key, primary_key_values", "def generate_keys(cls, des_key: str) -> List[list]:\n\n keys = []\n des_key = cls.string_to_bit_array(des_key)\n # Apply the initial Permutation on the key\n des_key = cls.permutation_expand(des_key, Tables.PC_1_TABLE)\n # Split it in to LEFT,RIGHT\n left, right = cls.n_split(des_key, 28)\n # Apply the 16 rounds\n for i in range(16):\n # Apply the shift associated with the round (not always 1)\n left, right = cls.shift(left, right, Tables.SHIFT_ARRAY[i])\n # Merge them\n tmp = left + right\n # Apply the Permutation to get the Ki\n keys.append(cls.permutation_expand(tmp, Tables.PC_2_TABLE))\n return keys", "def get_next_keys(self):\n P_List = []\n for key in self.Poss_Tree:\n for choice in self.Poss_Tree[key]:\n P_List.append(int(construct_pass(key, choice)))\n return P_List", "def create_keypairs(self,\n names=None,\n count=1,\n public_key=None,\n check=True):\n names = names or utils.generate_ids(count=count)\n\n keypairs = []\n for name in names:\n keypair = self._client.create(name, public_key=public_key)\n keypairs.append(keypair)\n\n if check:\n self.check_keypairs_presence(keypairs)\n\n for keypair in keypairs:\n if public_key is not None:\n assert_that(keypair.public_key, equal_to(public_key))\n\n return keypairs", "def GetInputFileKeys(version):\n \n if version == 7:\n inputfile_keys = ['DynBrkFi','PtfmFile',\n 'TwrFile','FurlFile','BldFile(1)',\n 'BldFile(2)','BldFile(3)','NoiseFile','ADAMSFile',\n 'LinFile']\n \n elif version == 8:\n errStr = 'Keys for FAST 8 have not been coded yet.'\n ValueError(errStr)\n \n else:\n errStr = 'Uncoded version \\\"{:d}\\\".'.format(version)\n ValueError(errStr)\n \n return inputfile_keys", "def _get_keys(self, listOfKeys):\n return self._keys", "def generateDictKeys(string, n,step=1):\n if type(string) != str or type(n) != int:\n raise ValueError('Please input string and integer for first and second argument')\n elif step == 1:\n keylist = [string+str(i) for i in range(n)]\n return keylist\n else:\n keylist = [string+str(i) for i in range(0, n*step, step)]\n return keylist", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def generate_keys(self):\n\n\t\tmin_ext = 1 << self.size_ext - 1\n\t\tmax_ext = 1 << self.size_ext\n\t\t\n\t\t\t\n\t\t# step 1 : chose random primary numbers p and q\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._p = n\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\twhile(n == self._p):\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._q = n\n\n\t\t#step 2 : compute n = pq\n\t\tself.n = self._p * self._q\n\n\t\t#step 3 : compute phi(n)\n\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t#step 4 : chose the exponent\n\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\textension = extension + self.D\n\t\twhile (gcd(self._phi,n) != 1):\n\t\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\tself._d = extension\n\n\t\t#step 5 : compute d (private key)\n\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)", "def __gen_keys__(self):\n if self.seed == b'':\n self.seed = urandom(self.seed_size)\n\n n_prev = Node(hash=hash_factory(data=bytes(self.seed)).digest())\n self.keys.insert(0, n_prev)\n\n for i in range(1, self.l + 1):\n n = Node(hash=hash_factory(data=bytes(n_prev.hash)).digest())\n self.keys.insert(0, n)\n n_prev = n\n\n # Add the decoy nodes as parents of pair nodes.\n # The pair nodes will _always_ be the right child of the decoy nodes.\n for i in range(2, self.l + 1, 2):\n n_pair = self.keys[i] # type: Node\n n_impair_prev = self.keys[i-1] # type: Node\n n_pair.parent = Node(hash=bytes(n_impair_prev.hash))\n n_pair.parent.right_child = n_pair", "def __generate_key_from_list_of(self, list_of_keys):\r\n list_of_keys = list(list_of_keys)\r\n list_of_keys.sort()\r\n return \",\".join(list_of_keys)", "def makePartition(self, count):\n\t\tvalidPart = (nr.random_sample(count) < self.proportion).astype(\"int32\")\n\t\ttrainPart = n.ones(count, dtype=\"int32\") - validPart\n\n\t\treturn self.toIndexes(trainPart), self.toIndexes(validPart)", "def iterkeys(self):\n\n for i in xrange(0, self._limit):\n try:\n self[i]\n yield i\n except KeyError:\n pass", "def prefix_keys(self, prefix, maxkeys=None):\n # TODO: write better documentation: describe purpose, provide example code\n if maxkeys is None:\n maxkeys = len(self)\n\n return wait(self.proto.fwmkeys(prefix, maxkeys))", "def generate_keys(self):\n\n\t\tcondition = False\n\t\t\n\t\t\t\n\t\twhile (not condition) :\n\t\t\t# step 1 : chose random primary numbers p and q\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\tself._p = n\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\twhile(n == self._p):\n\t\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\tself._q = n\n\n\t\t\t#step 2 : compute n = pq\n\t\t\tself.n = self._p * self._q\n\t\t\t\n\t\t\ta = find_invpow(self.n,4) // 3\n\t\t\tcondition = (self._p > self._q) and (self._p < 2 * self._q)\n\t\t\tif (not condition) :\n\t\t\t\tcontinue\n\n\t\t\tprint(\"step one OK\")\n\n\t\t\t#step 3 : compute phi(n)\n\t\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t\t#step 4 : chose the exponent\n\t\t\tn = randint(100,a)\n\t\t\twhile (gcd(self._phi,n) != 1):\n\t\t\t\tn = randint(100,self._phi)\n\t\t\tself._d = n\n\n\t\t\t#step 5 : compute d (private key)\n\t\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\t\tcondition = (self._d < a)\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)", "def getNoOfKeys(self):\n return len(self.__keyList)", "def generate_keys(self, p, q, e):\n d = EucAlg(p, q)\n for i in d:\n if i == 0:\n raise Exception(\"p and q are not relatively prime.\")\n\n n = p*q\n phi_n = (p-1)*(q-1)\n d = EucAlg(e, phi_n)\n\n self._private_key = (d[0],n)\n self.public_key = (e,n)", "def gen_records(self, count=None):\n if not count:\n count = self.num_rec\n tt = time.localtime(time.time())\n addr = None\n for i in range(count):\n logdbg(\"reading record %d of %d\" % (i+1, count))\n addr, record = self.get_record(addr, tt.tm_year, tt.tm_mon)\n yield addr, record", "def nextPrimaryKey( self, table_key=None ):\n\n # Make sure the dictionary key exists and, if not, create with zero as starting value.\n if not table_key in self.primaryKeys:\n self.primaryKeys[ table_key ] = 0\n\n\n # Increment the id.\n self.primaryKeys[ table_key ] += 1\n\n return self.primaryKeys[ table_key ]", "def get_keys(self):\r\n\t\tlogger.debug(\"Getting the keys\")\r\n\t\t\r\n\t\treturn db.get_items('keys')", "def keys(self, *args, **kwargs):\n return self._list(*args, **kwargs)", "def keys(self):\n return DeviceKeyCollection(client=self)", "def get_n_keys_params(filepaths):\n params = []\n for f in filepaths:\n with open(f) as json_file:\n data = json.load(json_file)\n params.append(int(data['n_keys']))\n return sorted(params)", "def _update_prepend_key(self):\n self.prepend_key -= 1", "def get_capture_keys(pen_id: int, start_date: str, end_date: str, inbound_bucket=INBOUND_BUCKET) -> List:\n\n site_id = PEN_SITE_MAPPING[pen_id]\n dates = get_dates_in_range(start_date, end_date)\n capture_keys = []\n for date in dates:\n print('Getting capture keys for pen_id={}, date={}...'.format(pen_id, date))\n for hour in DAYTIME_HOURS_GMT:\n hr = str(hour).zfill(2)\n s3_prefix = 'environment=production/site-id={}/pen-id={}/date={}/hour={}'.format(site_id, pen_id,\n date, hr)\n\n\n generator = s3.get_matching_s3_keys(inbound_bucket, prefix=s3_prefix,\n subsample=1.0,\n suffixes=['capture.json'])\n\n these_capture_keys = [key for key in generator]\n capture_keys.extend(these_capture_keys)\n\n return capture_keys", "def generate_new_key(self, index):\n new_key = self.chain_key.subkey(index)\n self._key_generated(new_key, index)", "def generate_key(self, rand = random.SystemRandom()):\n k = rand.randrange(0, self.n - 1)\n return k, self.base_mul(k)", "def get_required_parkeys(self, include_reffile_switch=True):\n parkeys = []\n for key in self.parkey:\n if isinstance(key, tuple):\n parkeys += list(key)\n else:\n parkeys.append(key)\n if include_reffile_switch and self._reffile_switch != \"NONE\":\n parkeys.append(self._reffile_switch)\n parkeys.extend(list(self.extra_keys))\n parkeys.extend(getattr(self.obs_package, \"extra_keys\", []))\n return parkeys", "def create_data_set():\n data_set = {}\n for index in range(1024):\n size = random.randint(1, 100) #nosec\n key = str(index).encode(\"utf-8\")\n data_set[key] = get_random_bytes(size)\n return data_set", "def keyGen(key):\n def leftShift(keyBitList):\n \"\"\"Perform a circular left shift on the first and second five bits\"\"\"\n shiftedKey = [None] * KeyLength\n shiftedKey[0:9] = keyBitList[1:10]\n shiftedKey[4] = keyBitList[0]\n shiftedKey[9] = keyBitList[5]\n return shiftedKey\n\n # Converts input key (integer) into a list of binary digits\n keyList = [(key & 1 << i) >> i for i in reversed(range(KeyLength))]\n permKeyList = [None] * KeyLength\n for index, elem in enumerate(P10table):\n permKeyList[index] = keyList[elem - 1]\n shiftedOnceKey = leftShift(permKeyList)\n shiftedTwiceKey = leftShift(leftShift(shiftedOnceKey))\n subKey1 = subKey2 = 0\n for index, elem in enumerate(P8table):\n subKey1 += (128 >> index) * shiftedOnceKey[elem - 1]\n subKey2 += (128 >> index) * shiftedTwiceKey[elem - 1]\n return (subKey1, subKey2)", "def create_pem_keys(self):\n self.random_rsa()\n\n return self.keys", "def partition_ids(n_objs: int, table_names: List[str]) -> List[DBObject]:\n\n rev_map: Dict[str, int] = {name: 1 for name in table_names}\n while sum(rev_map.values()) < n_objs:\n rev_map[random.choice(table_names)] += 1\n\n obj_list: List[DBObject] = list()\n for table, n in rev_map.items():\n for _ in range(n):\n obj_list.append(DBObject(len(obj_list), table))\n\n return obj_list", "def get_keys(self):\n bucket = self.resource.Bucket(self.bucketname)\n return [key.key for key in bucket.objects.all()]", "async def get_keys(self, collection):\n raise NotImplementedError", "def _iter(self, key, count, increment=1):\n key %= self.size\n while count > 0:\n try:\n yield self.db[key]\n except KeyError:\n # This shouldn't happen, but there's really nothing we can do if it does.\n # Skip over the damaged part of our database, ignoring the missing item.\n pass\n key = (key + increment) % self.size\n count -= 1", "def get_random_inchikeys(inchikey_list, train_val_test_split_fractions):\n random.shuffle(inchikey_list)\n\n train_num = int(train_val_test_split_fractions.train * len(inchikey_list))\n val_num = int(train_val_test_split_fractions.validation * len(inchikey_list))\n\n return TrainValTestInchikeys(inchikey_list[:train_num],\n inchikey_list[train_num:train_num + val_num],\n inchikey_list[train_num + val_num:])", "def allocate_ids(self, project_id, keys):\n request_pb = _datastore_pb2.AllocateIdsRequest(keys=keys)\n return _rpc(\n self.client._http,\n project_id,\n \"allocateIds\",\n self.client._base_url,\n self.client._client_info,\n request_pb,\n _datastore_pb2.AllocateIdsResponse,\n )", "def __iter__(self):\n with SessionContext(self.SessionClass) as session:\n keys = session.query(PAW2_DBObject.key)\n keys = [c[0] for c in keys]\n random.shuffle(keys)\n return keys.__iter__()", "def assign_nice_keys(self):\n\n print \"Assigning nice_key values to new documents on {0}...\".format(self.source_client)\n\n empty_nice_keys = self.source_client.find({\"nice_key\": {\"$exists\": False}}, {\"nice_key\": 1})\n\n total_empty_nice_keys = empty_nice_keys.count()\n\n if total_empty_nice_keys:\n\n print \"{0} empty nice key docs found\".format(total_empty_nice_keys)\n progress_report = \"PROCESSED {0}/{1}\".format(\"{0}\", total_empty_nice_keys)\n\n for ct, doc in enumerate(empty_nice_keys):\n\n nice_key = self.generate_nice_key()\n\n if nice_key:\n\n self.update_document_nice_key(doc, nice_key)\n\n elif nice_key is None:\n\n raise Exception(\"FAILED TO GENERATE KEY on doc {0} with ObjectId {1}\".format(ct, doc[\"_id\"]))\n\n if (ct % 10000 == 0):\n\n print progress_report.format(ct + 1)\n\n print progress_report.format(empty_nice_keys.count())", "def iterkeys(self):\n self.proto.iterinit()\n try:\n while True:\n yield wait(self.proto.iternext())\n except TyrantError:\n pass", "def input_to_hash(self, keys):\n basic_keys = []\n for i, key in enumerate(keys):\n s = ''\n #print(max(key), min(key))\n for val in key:\n s += \"{:04x}\".format(val)\n basic_keys.append(s)\n return basic_keys", "def keys(self) -> List:\n pass", "def iter_keys(self, search, itersize=None, client=None, version=None):\r\n\r\n if client is None:\r\n client = self.get_client(write=False)\r\n\r\n pattern = self.make_key(search, version=version)\r\n cursor = b\"0\"\r\n\r\n while True:\r\n cursor, data = client.scan(cursor, match=pattern, count=itersize)\r\n\r\n for item in data:\r\n item = smart_text(item)\r\n yield self.reverse_key(item)\r\n\r\n if cursor == b\"0\":\r\n break", "def get_init_import_records(self) -> list[ImportRecord]:\n import_records: set[ImportRecord] = set()\n import_records.add(\n ImportRecord(\n ImportString.parent() + ImportString(ServiceModuleName.client.name),\n self.client.name,\n )\n )\n if self.service_resource:\n import_records.add(\n ImportRecord(\n ImportString.parent() + ImportString(ServiceModuleName.service_resource.name),\n self.service_resource.name,\n )\n )\n for waiter in self.waiters:\n import_records.add(\n ImportRecord(\n ImportString.parent() + ImportString(ServiceModuleName.waiter.name),\n waiter.name,\n )\n )\n for paginator in self.paginators:\n import_records.add(\n ImportRecord(\n ImportString.parent() + ImportString(ServiceModuleName.paginator.name),\n paginator.name,\n )\n )\n\n return sorted(import_records)", "def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[T_str]:\n ...", "def Keys(self) -> _n_1_t_4:", "def ingest(self, secrecies, ncount=1, ncode=coring.MtrDex.Ed25519_Seed,\n dcode=coring.MtrDex.Blake3_256,\n algo=Algos.salty, salt=None, stem=None, tier=None,\n rooted=True, transferable=True, temp=False):\n pidx, rootSalt, rootTier = self.setup() # pidx, salt, tier for ingested sequence\n\n # configure parameters for creating new keys after ingested sequence\n if rooted and salt is None: # use root salt instead of random salt\n salt = rootSalt\n\n if rooted and tier is None: # use root tier as default\n tier = rootTier\n\n creator = Creatory(algo=algo).make(salt=salt, stem=stem, tier=tier)\n\n dt = \"\"\n pubs = []\n oridx = 0\n okidx = 0\n cridx = 0\n ckidx = 0\n ridx = 0\n kidx = 0\n verferies = [] # list of lists of verfers\n first = True\n secrecies = deque(secrecies)\n while secrecies:\n csecrets = secrecies.popleft() # current\n csigners = [coring.Signer(qb64=secret, transferable=transferable)\n for secret in csecrets]\n csize = len(csigners)\n verferies.append([signer.verfer for signer in csigners])\n\n if first:\n pp = PrePrm(pidx=pidx,\n algo=algo,\n salt=creator.salt,\n stem=creator.stem,\n tier=creator.tier)\n pre = csigners[0].verfer.qb64b\n result = self.keeper.putPre(key=pre, val=pre)\n if not result:\n raise ValueError(\"Already incepted pre={}.\".format(pre.decode(\"utf-8\")))\n\n result = self.keeper.putPrm(key=pre, val=json.dumps(asdict(pp)).encode(\"utf-8\"))\n if not result:\n raise ValueError(\"Already incepted prm for pre={}.\".format(pre.decode(\"utf-8\")))\n\n self.setPidx(pidx + 1) # increment for next inception\n first = False\n\n for signer in csigners: # store secrets (private key val keyed by public key)\n self.keeper.putPri(key=signer.verfer.qb64b, val=signer.qb64b)\n\n self.keeper.putPubs(key=riKey(pre, ri=ridx),\n val=json.dumps([signer.verfer.qb64 for signer in csigners]).encode(\"utf-8\"))\n\n odt = dt\n dt = helping.nowIso8601()\n opubs = pubs\n pubs = [signer.verfer.qb64 for signer in csigners]\n okidx = ckidx # old kidx\n oridx = cridx # old ridx\n ckidx = kidx # current kidx\n cridx = ridx # currrent ridx\n ridx += 1 # next ridx\n kidx += csize # next kidx\n\n\n # create nxt signers after ingested signers\n nsigners = creator.create(count=ncount, code=ncode,\n pidx=pidx, ridx=ridx, kidx=kidx,\n transferable=transferable, temp=temp)\n\n digers = [coring.Diger(ser=signer.verfer.qb64b, code=dcode) for signer in nsigners]\n\n for signer in nsigners: # store secrets (private key val keyed by public key)\n self.keeper.putPri(key=signer.verfer.qb64b, val=signer.qb64b)\n\n self.keeper.putPubs(key=riKey(pre, ri=ridx),\n val=json.dumps([signer.verfer.qb64 for signer in nsigners]).encode(\"utf-8\"))\n\n csith = \"{:x}\".format(max(1, math.ceil(len(csigners) / 2)))\n cst = coring.Tholder(sith=csith).sith\n\n nsith = \"{:x}\".format(max(0, math.ceil(len(nsigners) / 2)))\n nst = coring.Tholder(sith=nsith).sith\n\n dt = helping.nowIso8601()\n old=PubLot(pubs=opubs, ridx=oridx, kidx=okidx, st='0', dt=odt)\n new=PubLot(pubs=[signer.verfer.qb64 for signer in csigners],\n ridx=cridx, kidx=ckidx, st=cst, dt=dt)\n nxt=PubLot(pubs=[signer.verfer.qb64 for signer in nsigners],\n ridx=ridx, kidx=kidx, st=nst, dt=dt)\n\n ps = PreSit(old=old, new=new, nxt=nxt)\n result = self.keeper.setSit(key=pre, val=json.dumps(asdict(ps)).encode(\"utf-8\"))\n if not result:\n raise ValueError(\"Problem updating pubsit db for pre={}.\".format(pre))\n\n return (verferies, digers)", "def keys_sorted_by_frequency(self, cutoff=100):\n return [key for key, _ in self.counter.most_common()][:cutoff]", "def create_DES_subkeys(key):\r\n # permutation with the original key\r\n fullKey = bit_permutation(key, PC1)\r\n\r\n #we split the key into right and left part\r\n lprev, rprev = fullKey[:28], fullKey[28:]\r\n\r\n nFinalBlock = []\r\n #We now proceed through 16 iterations, for 1<=n<=16\r\n for n in range (16):\r\n lPart, rPart = left_shift_rot(lprev, key_shifts[n]), left_shift_rot(rprev, key_shifts[n])\r\n lprev, rprev = lPart, rPart\r\n nFinalBlock += [\"\".join([lPart, rPart])]\r\n\r\n #We now form the keys Kn, for 1<=n<=16, by applying the following permutation table to each of the concatenated pairs rPart, lPart\r\n nPrimeFinalBlock = []\r\n for n in range(16):\r\n nPrimeFinalBlock += [bit_permutation(nFinalBlock[n], PC2)]\r\n\r\n return nPrimeFinalBlock", "def __create_beginning_of_counterpoint(self) -> List[LineElement]:\n start_note = self.counterpoint_specifications['start_note']\n start_element = LineElement(\n self.scale.get_element_by_note(start_note),\n self.counterpoint_specifications['start_pause_in_eighths'],\n N_EIGHTHS_PER_MEASURE\n )\n counterpoint = [start_element]\n return counterpoint", "def _gpg_keys(self) -> ListKeys:\n return self.gpg.list_keys()", "def keys(self):\n return self._sequence", "def generate_valid_keys():\n valid_keys = []\n for minimum, maximum in RANGES:\n for i in range(ord(minimum), ord(maximum) + 1):\n valid_keys.append(chr(i))\n return valid_keys", "def keys(self):\n return self.get_list(self.cloudman.list_keypairs(),\n kind=\"key\")", "def _Next(self, count=None):\n if count is not None and (not isinstance(count, (int, long)) or count <= 0):\n raise datastore_errors.BadArgumentError(\n 'Argument to _Next must be an int greater than 0; received %s (a %s)' %\n (count, typename(count)))\n\n if self.__buffer:\n if count is None:\n entity_list = self.__buffer\n self.__buffer = []\n return entity_list\n elif count <= len(self.__buffer):\n entity_list = self.__buffer[:count]\n del self.__buffer[:count]\n return entity_list\n else:\n entity_list = self.__buffer\n self.__buffer = []\n count -= len(entity_list)\n else:\n entity_list = []\n\n\n if not self.__more_results:\n return entity_list\n\n req = datastore_pb.NextRequest()\n if count is not None:\n req.set_count(count)\n req.mutable_cursor().CopyFrom(self.__cursor)\n result = datastore_pb.QueryResult()\n try:\n apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Next', req, result)\n except apiproxy_errors.ApplicationError, err:\n raise _ToDatastoreError(err)\n\n return entity_list + self._ProcessQueryResult(result)", "def initialize(self, keys: List[str]):", "def keys(self, args=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None):\n return itertools.imap(ITEMGETTER_1,\n self.pairs(args, lo, hi, reverse, max, include, txn))", "def _make_key(self, record_dict: Dict[str, Any]) -> int:\n return self._keys.setdefault(frozenset(record_dict.keys()), len(self._keys))", "def keys(self, pattern=\"*\"):\n lenOfPrefix = len(self.appendKeys(\"\"))\n return [key[lenOfPrefix:] for key in\n self.redis.keys(self.appendKeys(pattern))]", "def _id_seq(self):\n return list(self.keys())", "def return_least_keys(self,\r\n keyset,\r\n numberof=0,\r\n override=False,\r\n no_allcaps=True,\r\n add_number=False):\r\n\r\n if override:\r\n return keyset\r\n if numberof == 0:\r\n numberof = self.default_dict['numberof']\r\n if not keyset:\r\n return []\r\n freq_list = self.order_keys(keyset)\r\n freq_list = [a_temp[0]+self.show_key_freq*add_number\r\n *(a_temp[1]>0)*(' ('+str(a_temp[1])+')')\r\n for a_temp in freq_list][0 : numberof]\r\n if no_allcaps and len(freq_list) > 3:\r\n freq_list = [a_temp for a_temp\r\n in freq_list\r\n if not a_temp.isupper() and not '@' in a_temp]+[a_temp for a_temp\r\n in freq_list if a_temp.isupper() or not '@' in a_temp]\r\n## freq_list = sorted(freq_list, key=lambda x_temp: len(x_temp))\r\n## freq_list.reverse()\r\n\r\n return freq_list", "def PCTSignatures_generateInitPoints(initPoints, count, pointDistribution): # real signature unknown; restored from __doc__\n pass", "def create_ssh_keys(self):\n self.random_ssh()\n\n return self.keys", "def keys(self):\n sql = u\"\"\"\n SELECT `key` FROM `{table}` WHERE 1\n \"\"\".format(table=self.name)\n\n for row in self.conn.execute(sql):\n yield row['key']", "def get_pubkey_ids(self, addr):\n\n if len(self.pubkeys) > 0 and self.pubkeys[-1].has_key('fingerprint') \\\n and self.pubkeys[-1].has_key('uids'):\n\n pubkey_ids = []\n # compile pattern before use for better performance\n RCPT_RE = re.compile(addr)\n for k in self.pubkeys:\n for uid in k['uids']:\n match = RCPT_RE.search(uid)\n if match is not None:\n # check for key expiration\n if k['expires'] == '':\n pubkey_ids.append(k['fingerprint'][-16:])\n elif (time()+60) < float(k['expires']):\n pubkey_ids.append(k['fingerprint'][-16:])\n break\n return pubkey_ids", "def _getKeyList(self):\n return LinkedList(InternalRack(self, 1))", "def _reset_primary_keys_generators(self):\n self.primary_key = dict()\n self.remaining_primary_key = dict()", "def generate_rows(n):\n for i in range(n):\n yield [\n # seq\n i,\n # guid-like id\n hashlib.sha224(bytes(i)).hexdigest(),\n # seq\n i,\n # seq\n i,\n # cc_number \n fake.credit_card_number(card_type=None),\n # expire_date\n fake.date_between('-6y', '+0y').strftime(\"%m/%d/%Y\"),\n # billing_address\n fake.address(),\n ]", "def keys(self, search, version=None, client=None):\r\n\r\n if client is None:\r\n client = self.get_client(write=False)\r\n\r\n pattern = self.make_key(search, version=version)\r\n try:\r\n encoding_map = [smart_text(k) for k in client.keys(pattern)]\r\n return [self.reverse_key(k) for k in encoding_map]\r\n except ConnectionError:\r\n raise ConnectionInterrupted(connection=client)", "def iterkeys(self, essid):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject.key)\n q = q.join(PYR2_DBObject).join(ESSID_DBObject)\n q = q.filter(ESSID_DBObject.essid == essid)\n keys = q.all()\n return (c[0] for c in keys)", "def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[str]:\n return self._partition_keys", "def __init__(self, path, number_keys=1):\n\n self.path = path\n self.keyring = []\n if os.path.exists(path):\n self.keyring = read_keys(path)\n else:\n for n in range(number_keys):\n key = generate_key(generate_random())\n self.keyring.append(key)\n write_keys(path, self.keyring)", "def generate_key(self, size):\n key = bytearray()\n for i in range(0,size):\n random_byte = ord(os.urandom(1))\n key.append(random_byte)\n return key", "def get_key_list(self, email=\"\"):\n\t\tif email:\n\t\t\twhere_clause = \" where email = '%s'\" % email\n\t\telse:\n\t\t\twhere_clause = \"\"\n\n\t\treturn self.app.db.query(\n\t\t\t\"\"\"\n\t\t\tselect\n\t\t\t\tapi_key,\n\t\t\t\towner,\n\t\t\t\tapp_name,\n\t\t\t\temail,\n\t\t\t\turl,\n\t\t\t\tcreated\n\t\t\tfrom\n\t\t\t\tapi_keys\n\t\t\t%s\n\t\t\t\"\"\" % where_clause)", "def list(self, path, filename=None, start=None, stop=None, recursive=False, directories=False):\n storageScheme, keys = self.getkeys(\n path, filename=filename, directories=directories, recursive=recursive)\n keys = [storageScheme + \":///\" + key.bucket.name + \"/\" + key.name for key in keys]\n keys.sort()\n keys = select(keys, start, stop)\n return keys", "def Keys(self) -> NameObjectCollectionBase.KeysCollection:", "def get_rowkeys(\n table_instance, rowkeys: List[str], sep: str = \"#\",\n) -> List[models.RowModelOdd]:\n row_model = [_get_single_row(table_instance, rowkey, sep=\":\") for rowkey in rowkeys]\n\n return row_model", "def keys(self):\n key_index = 0x4\n\n for _ in range(0, self._keys_len()):\n key_offset = self.abs_offset_from_hbin_offset(self.unpack_dword(key_index))\n d = HBINCell(self._buf, key_offset, self)\n\n try:\n for k in d.child().keys():\n yield k\n except RegistryStructureDoesNotExist:\n raise ParseException(\"Unsupported subkey list encountered.\")\n\n key_index += 4", "def keys():", "def getNumberOfKeys(self) -> int:\n ...", "def generate_brainpool_curves(count: int, p: ZZ, initial_seed: str) -> SimulatedCurves:\n simulated_curves = SimulatedCurves(\"brainpool\", p.nbits(), initial_seed, count)\n curve = Brainpool(initial_seed, p)\n b_seed = None\n for _ in range(count):\n if curve.not_defined():\n curve.set_a()\n if not curve.check_a():\n curve.seed_update()\n curve.clear()\n continue\n b_seed = increment_seed(curve.seed())\n curve.set_b(b_seed)\n if not curve.check_b():\n b_seed = increment_seed(b_seed)\n continue\n if not curve.secure():\n curve.set_seed(increment_seed(b_seed))\n curve.clear()\n continue\n curve.generate_generator(b_seed)\n curve.compute_properties()\n simulated_curves.add_curve(curve)\n curve = Brainpool(curve.seed(), p)\n curve.seed_update()\n\n return simulated_curves", "def process_preqs(need_to_load):\n\n n_to_load = 0\n while n_to_load!=len(need_to_load): # i.e. unchanged\n n_to_load=len(need_to_load)\n new_need_to_load = need_to_load.copy()\n for to_load in need_to_load:\n if to_load in preqs_dict:\n all_to_add = preqs_dict[to_load]\n if isinstance(all_to_add,list):\n new_need_to_load.update(all_to_add)\n else:\n new_need_to_load.add(all_to_add)\n need_to_load = new_need_to_load\n\n return need_to_load", "def get_all( # type: ignore\n self,\n limit: t.Optional[int] = None, # dead: disable\n offset: t.Optional[int] = None, # dead: disable\n ) -> t.List[KeyValueEntity]:\n return [KeyValueEntity(uuid=k, val=v) for k, v in self._data.items()]", "def fetch(self, n: int) -> t.List[Record]:\n self._buffer(n)\n return [\n self._record_buffer.popleft()\n for _ in range(min(n, len(self._record_buffer)))\n ]", "def get_next_conf_keys(self):\n C_List = []\n for key in self.Poss_Tree:\n key_c = int(str(key)[-1])\n for choice in self.Poss_Tree[key]:\n if choice == key_c:\n C_List.append(int(construct_pass(key, choice)))\n return C_List" ]
[ "0.6100436", "0.57589746", "0.5668012", "0.56462705", "0.5632386", "0.55703026", "0.5561076", "0.5519232", "0.5507431", "0.5470912", "0.546842", "0.54056185", "0.52409744", "0.5236303", "0.5193606", "0.5192819", "0.51398957", "0.50818324", "0.5079338", "0.5055663", "0.5039773", "0.50224555", "0.49965414", "0.4986866", "0.4963776", "0.49413583", "0.49408898", "0.49352106", "0.4926059", "0.49246648", "0.49122408", "0.48962703", "0.48962343", "0.48765165", "0.48655427", "0.4851234", "0.48471648", "0.48468205", "0.48165303", "0.48132", "0.48066506", "0.47986263", "0.47919255", "0.4777471", "0.47654858", "0.47607404", "0.47584006", "0.47546527", "0.47506744", "0.47386247", "0.4731208", "0.47271734", "0.4719986", "0.47132283", "0.47030202", "0.46975565", "0.46954826", "0.4695053", "0.46929777", "0.46882325", "0.46867502", "0.46772876", "0.46766642", "0.4675927", "0.46743065", "0.46676078", "0.46478742", "0.46414292", "0.4639307", "0.4635114", "0.46342966", "0.46319056", "0.4631828", "0.4627521", "0.46257076", "0.46244994", "0.46222025", "0.4618213", "0.4615191", "0.46117735", "0.46077874", "0.46055046", "0.46008974", "0.45989573", "0.45963585", "0.4591236", "0.45825478", "0.45822784", "0.45775297", "0.4576862", "0.45732227", "0.45677522", "0.45663857", "0.45610926", "0.45577574", "0.4557564", "0.45518163", "0.45466468", "0.45463493", "0.45458618" ]
0.87624663
0